aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Makefile50
-rw-r--r--drivers/net/af_packet/Makefile7
-rw-r--r--drivers/net/af_packet/rte_eth_af_packet.c176
-rw-r--r--drivers/net/ark/Makefile66
-rw-r--r--drivers/net/ark/ark_ddm.c151
-rw-r--r--drivers/net/ark/ark_ddm.h177
-rw-r--r--drivers/net/ark/ark_ethdev.c1001
-rw-r--r--drivers/net/ark/ark_ethdev.h41
-rw-r--r--drivers/net/ark/ark_ethdev_rx.c673
-rw-r--r--drivers/net/ark/ark_ethdev_rx.h65
-rw-r--r--drivers/net/ark/ark_ethdev_tx.c468
-rw-r--r--drivers/net/ark/ark_ethdev_tx.h59
-rw-r--r--drivers/net/ark/ark_ext.h115
-rw-r--r--drivers/net/ark/ark_global.h161
-rw-r--r--drivers/net/ark/ark_logs.h119
-rw-r--r--drivers/net/ark/ark_mpu.c181
-rw-r--r--drivers/net/ark/ark_mpu.h154
-rw-r--r--drivers/net/ark/ark_pktchkr.c474
-rw-r--r--drivers/net/ark/ark_pktchkr.h117
-rw-r--r--drivers/net/ark/ark_pktdir.c80
-rw-r--r--drivers/net/ark/ark_pktdir.h70
-rw-r--r--drivers/net/ark/ark_pktgen.c496
-rw-r--r--drivers/net/ark/ark_pktgen.h108
-rw-r--r--drivers/net/ark/ark_rqp.c97
-rw-r--r--drivers/net/ark/ark_rqp.h86
-rw-r--r--drivers/net/ark/ark_udm.c226
-rw-r--r--drivers/net/ark/ark_udm.h192
-rw-r--r--drivers/net/ark/rte_pmd_ark_version.map4
-rw-r--r--drivers/net/avp/Makefile57
-rw-r--r--drivers/net/avp/avp_ethdev.c2312
-rw-r--r--drivers/net/avp/avp_logs.h59
-rw-r--r--drivers/net/avp/rte_avp_common.h432
-rw-r--r--drivers/net/avp/rte_avp_fifo.h169
-rw-r--r--drivers/net/avp/rte_pmd_avp_version.map4
-rw-r--r--drivers/net/bnx2x/Makefile4
-rw-r--r--drivers/net/bnx2x/bnx2x.c4
-rw-r--r--drivers/net/bnx2x/bnx2x.h32
-rw-r--r--drivers/net/bnx2x/bnx2x_ethdev.c93
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.c4
-rw-r--r--drivers/net/bnx2x/bnx2x_rxtx.h2
-rw-r--r--drivers/net/bnx2x/ecore_sp.c2
-rw-r--r--drivers/net/bnx2x/elink.c3
-rw-r--r--drivers/net/bnxt/Makefile6
-rw-r--r--drivers/net/bnxt/bnxt_cpr.h13
-rw-r--r--drivers/net/bnxt/bnxt_ethdev.c117
-rw-r--r--drivers/net/bnxt/bnxt_hwrm.c8
-rw-r--r--drivers/net/bnxt/bnxt_irq.c3
-rw-r--r--drivers/net/bnxt/bnxt_ring.c16
-rw-r--r--drivers/net/bnxt/bnxt_txr.c2
-rw-r--r--drivers/net/bnxt/bnxt_txr.h6
-rw-r--r--drivers/net/bonding/Makefile9
-rw-r--r--drivers/net/bonding/rte_eth_bond_8023ad.c13
-rw-r--r--drivers/net/bonding/rte_eth_bond_api.c185
-rw-r--r--drivers/net/bonding/rte_eth_bond_args.c14
-rw-r--r--drivers/net/bonding/rte_eth_bond_pmd.c240
-rw-r--r--drivers/net/bonding/rte_eth_bond_private.h5
-rw-r--r--drivers/net/cxgbe/Makefile5
-rw-r--r--drivers/net/cxgbe/base/adapter.h44
-rw-r--r--drivers/net/cxgbe/base/t4_hw.c3
-rw-r--r--drivers/net/cxgbe/cxgbe_compat.h8
-rw-r--r--drivers/net/cxgbe/cxgbe_ethdev.c36
-rw-r--r--drivers/net/cxgbe/cxgbe_main.c8
-rw-r--r--drivers/net/cxgbe/sge.c22
-rw-r--r--drivers/net/dpaa2/Makefile70
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni.c344
-rw-r--r--drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h257
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.c1035
-rw-r--r--drivers/net/dpaa2/dpaa2_ethdev.h83
-rw-r--r--drivers/net/dpaa2/dpaa2_rxtx.c422
-rw-r--r--drivers/net/dpaa2/mc/dpni.c739
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpkg.h184
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni.h1217
-rw-r--r--drivers/net/dpaa2/mc/fsl_dpni_cmd.h334
-rw-r--r--drivers/net/dpaa2/mc/fsl_net.h487
-rw-r--r--drivers/net/dpaa2/rte_pmd_dpaa2_version.map (renamed from drivers/net/mpipe/rte_pmd_mpipe_version.map)3
-rw-r--r--drivers/net/e1000/Makefile8
-rw-r--r--drivers/net/e1000/base/README25
-rw-r--r--drivers/net/e1000/base/e1000_82575.c1
-rw-r--r--drivers/net/e1000/base/e1000_82575.h1
-rw-r--r--drivers/net/e1000/base/e1000_api.c19
-rw-r--r--drivers/net/e1000/base/e1000_defines.h9
-rw-r--r--drivers/net/e1000/base/e1000_hw.h21
-rw-r--r--drivers/net/e1000/base/e1000_ich8lan.c865
-rw-r--r--drivers/net/e1000/base/e1000_ich8lan.h21
-rw-r--r--drivers/net/e1000/base/e1000_mbx.c36
-rw-r--r--drivers/net/e1000/base/e1000_nvm.c1
-rw-r--r--drivers/net/e1000/base/e1000_osdep.h18
-rw-r--r--drivers/net/e1000/base/e1000_regs.h7
-rw-r--r--drivers/net/e1000/base/e1000_vf.c3
-rw-r--r--drivers/net/e1000/e1000_ethdev.h21
-rw-r--r--drivers/net/e1000/em_ethdev.c149
-rw-r--r--drivers/net/e1000/em_rxtx.c106
-rw-r--r--drivers/net/e1000/igb_ethdev.c362
-rw-r--r--drivers/net/e1000/igb_pf.c8
-rw-r--r--drivers/net/e1000/igb_rxtx.c239
-rw-r--r--drivers/net/ena/Makefile5
-rw-r--r--drivers/net/ena/base/ena_com.c6
-rw-r--r--drivers/net/ena/base/ena_plat_dpdk.h15
-rw-r--r--drivers/net/ena/ena_ethdev.c162
-rw-r--r--drivers/net/ena/ena_ethdev.h1
-rw-r--r--drivers/net/enic/Makefile6
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_compat.h27
-rw-r--r--drivers/net/enic/enic_ethdev.c51
-rw-r--r--drivers/net/enic/enic_main.c33
-rw-r--r--drivers/net/enic/enic_rxtx.c132
-rw-r--r--drivers/net/fm10k/Makefile10
-rw-r--r--drivers/net/fm10k/base/fm10k_common.c11
-rw-r--r--drivers/net/fm10k/base/fm10k_mbx.c10
-rw-r--r--drivers/net/fm10k/base/fm10k_mbx.h2
-rw-r--r--drivers/net/fm10k/base/fm10k_osdep.h34
-rw-r--r--drivers/net/fm10k/base/fm10k_pf.c140
-rw-r--r--drivers/net/fm10k/base/fm10k_pf.h2
-rw-r--r--drivers/net/fm10k/base/fm10k_tlv.c16
-rw-r--r--drivers/net/fm10k/base/fm10k_type.h49
-rw-r--r--drivers/net/fm10k/base/fm10k_vf.c24
-rw-r--r--drivers/net/fm10k/fm10k.h10
-rw-r--r--drivers/net/fm10k/fm10k_ethdev.c194
-rw-r--r--drivers/net/fm10k/fm10k_rxtx.c56
-rw-r--r--drivers/net/fm10k/fm10k_rxtx_vec.c29
-rw-r--r--drivers/net/i40e/Makefile16
-rw-r--r--drivers/net/i40e/base/README59
-rw-r--r--drivers/net/i40e/base/i40e_adminq.c8
-rw-r--r--drivers/net/i40e/base/i40e_adminq.h4
-rw-r--r--drivers/net/i40e/base/i40e_adminq_cmd.h180
-rw-r--r--drivers/net/i40e/base/i40e_common.c802
-rw-r--r--drivers/net/i40e/base/i40e_dcb.c2
-rw-r--r--drivers/net/i40e/base/i40e_devids.h3
-rw-r--r--drivers/net/i40e/base/i40e_lan_hmc.c5
-rw-r--r--drivers/net/i40e/base/i40e_nvm.c52
-rw-r--r--drivers/net/i40e/base/i40e_osdep.h10
-rw-r--r--drivers/net/i40e/base/i40e_prototype.h59
-rw-r--r--drivers/net/i40e/base/i40e_register.h2
-rw-r--r--drivers/net/i40e/base/i40e_type.h174
-rw-r--r--drivers/net/i40e/base/i40e_virtchnl.h7
-rw-r--r--drivers/net/i40e/i40e_ethdev.c1852
-rw-r--r--drivers/net/i40e/i40e_ethdev.h283
-rw-r--r--drivers/net/i40e/i40e_ethdev_vf.c235
-rw-r--r--drivers/net/i40e/i40e_fdir.c138
-rw-r--r--drivers/net/i40e/i40e_flow.c2258
-rw-r--r--drivers/net/i40e/i40e_logs.h17
-rw-r--r--drivers/net/i40e/i40e_pf.c476
-rw-r--r--drivers/net/i40e/i40e_pf.h4
-rw-r--r--drivers/net/i40e/i40e_rxtx.c272
-rw-r--r--drivers/net/i40e/i40e_rxtx.h29
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_altivec.c645
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_common.h23
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_neon.c91
-rw-r--r--drivers/net/i40e/i40e_rxtx_vec_sse.c92
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.c1937
-rw-r--r--drivers/net/i40e/rte_pmd_i40e.h590
-rw-r--r--drivers/net/i40e/rte_pmd_i40e_version.map36
-rw-r--r--drivers/net/ixgbe/Makefile11
-rw-r--r--drivers/net/ixgbe/base/README5
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.c4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82598.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.c4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_82599.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.c10
-rw-r--r--drivers/net/ixgbe/base/ixgbe_api.h4
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.c68
-rw-r--r--drivers/net/ixgbe/base/ixgbe_common.h7
-rw-r--r--drivers/net/ixgbe/base/ixgbe_hv_vf.c240
-rw-r--r--drivers/net/ixgbe/base/ixgbe_hv_vf.h41
-rw-r--r--drivers/net/ixgbe/base/ixgbe_mbx.h8
-rw-r--r--drivers/net/ixgbe/base/ixgbe_osdep.h15
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.c207
-rw-r--r--drivers/net/ixgbe/base/ixgbe_phy.h74
-rw-r--r--drivers/net/ixgbe/base/ixgbe_type.h143
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.c20
-rw-r--r--drivers/net/ixgbe/base/ixgbe_vf.h3
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.c33
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x540.h2
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.c1148
-rw-r--r--drivers/net/ixgbe/base/ixgbe_x550.h6
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.c1973
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h287
-rw-r--r--drivers/net/ixgbe/ixgbe_fdir.c415
-rw-r--r--drivers/net/ixgbe/ixgbe_flow.c2778
-rw-r--r--drivers/net/ixgbe/ixgbe_pf.c44
-rw-r--r--drivers/net/ixgbe/ixgbe_regs.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.c293
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_common.h13
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c24
-rw-r--r--drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c102
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.c910
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe.h245
-rw-r--r--drivers/net/ixgbe/rte_pmd_ixgbe_version.map23
-rw-r--r--drivers/net/kni/Makefile56
-rw-r--r--drivers/net/kni/rte_eth_kni.c510
-rw-r--r--drivers/net/kni/rte_pmd_kni_version.map4
-rw-r--r--drivers/net/liquidio/Makefile58
-rw-r--r--drivers/net/liquidio/base/lio_23xx_reg.h194
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.c588
-rw-r--r--drivers/net/liquidio/base/lio_23xx_vf.h97
-rw-r--r--drivers/net/liquidio/base/lio_hw_defs.h249
-rw-r--r--drivers/net/liquidio/base/lio_mbox.c275
-rw-r--r--drivers/net/liquidio/base/lio_mbox.h131
-rw-r--r--drivers/net/liquidio/lio_ethdev.c2058
-rw-r--r--drivers/net/liquidio/lio_ethdev.h205
-rw-r--r--drivers/net/liquidio/lio_logs.h91
-rw-r--r--drivers/net/liquidio/lio_rxtx.c1885
-rw-r--r--drivers/net/liquidio/lio_rxtx.h769
-rw-r--r--drivers/net/liquidio/lio_struct.h689
-rw-r--r--drivers/net/liquidio/rte_pmd_lio_version.map4
-rw-r--r--drivers/net/mlx4/Makefile11
-rw-r--r--drivers/net/mlx4/mlx4.c772
-rw-r--r--drivers/net/mlx4/mlx4.h198
-rw-r--r--drivers/net/mlx4/mlx4_flow.c1090
-rw-r--r--drivers/net/mlx4/mlx4_flow.h102
-rw-r--r--drivers/net/mlx5/Makefile15
-rw-r--r--drivers/net/mlx5/mlx5.c223
-rw-r--r--drivers/net/mlx5/mlx5.h57
-rw-r--r--drivers/net/mlx5/mlx5_defs.h14
-rw-r--r--drivers/net/mlx5/mlx5_ethdev.c126
-rw-r--r--drivers/net/mlx5/mlx5_fdir.c15
-rw-r--r--drivers/net/mlx5/mlx5_flow.c1586
-rw-r--r--drivers/net/mlx5/mlx5_mac.c16
-rw-r--r--drivers/net/mlx5/mlx5_prm.h124
-rw-r--r--drivers/net/mlx5/mlx5_rss.c18
-rw-r--r--drivers/net/mlx5/mlx5_rxq.c239
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.c1097
-rw-r--r--drivers/net/mlx5/mlx5_rxtx.h30
-rw-r--r--drivers/net/mlx5/mlx5_stats.c365
-rw-r--r--drivers/net/mlx5/mlx5_trigger.c32
-rw-r--r--drivers/net/mlx5/mlx5_txq.c162
-rw-r--r--drivers/net/mpipe/Makefile47
-rw-r--r--drivers/net/mpipe/mpipe_tilegx.c1655
-rw-r--r--drivers/net/nfp/Makefile5
-rw-r--r--drivers/net/nfp/nfp_net.c471
-rw-r--r--drivers/net/nfp/nfp_net_ctrl.h12
-rw-r--r--drivers/net/nfp/nfp_net_pmd.h16
-rw-r--r--drivers/net/null/Makefile14
-rw-r--r--drivers/net/null/rte_eth_null.c77
-rw-r--r--drivers/net/null/rte_pmd_null_version.map7
-rw-r--r--drivers/net/pcap/Makefile7
-rw-r--r--drivers/net/pcap/rte_eth_pcap.c81
-rw-r--r--drivers/net/qede/Makefile41
-rw-r--r--drivers/net/qede/base/bcm_osal.c4
-rw-r--r--drivers/net/qede/base/bcm_osal.h40
-rw-r--r--drivers/net/qede/base/common_hsi.h196
-rw-r--r--drivers/net/qede/base/ecore.h193
-rw-r--r--drivers/net/qede/base/ecore_chain.h142
-rw-r--r--drivers/net/qede/base/ecore_cxt.c409
-rw-r--r--drivers/net/qede/base/ecore_cxt.h68
-rw-r--r--drivers/net/qede/base/ecore_cxt_api.h24
-rw-r--r--drivers/net/qede/base/ecore_dcbx.c456
-rw-r--r--drivers/net/qede/base/ecore_dcbx.h10
-rw-r--r--drivers/net/qede/base/ecore_dcbx_api.h5
-rw-r--r--drivers/net/qede/base/ecore_dev.c2342
-rw-r--r--drivers/net/qede/base/ecore_dev_api.h127
-rw-r--r--drivers/net/qede/base/ecore_gtt_reg_addr.h20
-rw-r--r--drivers/net/qede/base/ecore_hsi_common.h911
-rw-r--r--drivers/net/qede/base/ecore_hsi_debug_tools.h221
-rw-r--r--drivers/net/qede/base/ecore_hsi_eth.h2079
-rw-r--r--drivers/net/qede/base/ecore_hsi_init_tool.h90
-rw-r--r--drivers/net/qede/base/ecore_hw.c55
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.c1454
-rw-r--r--drivers/net/qede/base/ecore_init_fw_funcs.h172
-rw-r--r--drivers/net/qede/base/ecore_init_ops.c26
-rw-r--r--drivers/net/qede/base/ecore_int.c171
-rw-r--r--drivers/net/qede/base/ecore_int.h10
-rw-r--r--drivers/net/qede/base/ecore_int_api.h51
-rw-r--r--drivers/net/qede/base/ecore_iov_api.h69
-rw-r--r--drivers/net/qede/base/ecore_iro.h8
-rw-r--r--drivers/net/qede/base/ecore_iro_values.h32
-rw-r--r--drivers/net/qede/base/ecore_l2.c892
-rw-r--r--drivers/net/qede/base/ecore_l2.h149
-rw-r--r--drivers/net/qede/base/ecore_l2_api.h137
-rw-r--r--drivers/net/qede/base/ecore_mcp.c1099
-rw-r--r--drivers/net/qede/base/ecore_mcp.h192
-rw-r--r--drivers/net/qede/base/ecore_mcp_api.h349
-rw-r--r--drivers/net/qede/base/ecore_mng_tlv.c1535
-rw-r--r--drivers/net/qede/base/ecore_proto_if.h16
-rw-r--r--drivers/net/qede/base/ecore_rt_defs.h623
-rw-r--r--drivers/net/qede/base/ecore_sp_api.h19
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.c376
-rw-r--r--drivers/net/qede/base/ecore_sp_commands.h23
-rw-r--r--drivers/net/qede/base/ecore_spq.c108
-rw-r--r--drivers/net/qede/base/ecore_spq.h36
-rw-r--r--drivers/net/qede/base/ecore_sriov.c1221
-rw-r--r--drivers/net/qede/base/ecore_sriov.h28
-rw-r--r--drivers/net/qede/base/ecore_status.h1
-rw-r--r--drivers/net/qede/base/ecore_utils.h6
-rw-r--r--drivers/net/qede/base/ecore_vf.c418
-rw-r--r--drivers/net/qede/base/ecore_vf.h85
-rw-r--r--drivers/net/qede/base/ecore_vf_api.h21
-rw-r--r--drivers/net/qede/base/ecore_vfpf_if.h55
-rw-r--r--drivers/net/qede/base/eth_common.h36
-rw-r--r--drivers/net/qede/base/mcp_public.h377
-rw-r--r--drivers/net/qede/base/nvm_cfg.h541
-rw-r--r--drivers/net/qede/base/reg_addr.h59
-rw-r--r--drivers/net/qede/qede_eth_if.c107
-rw-r--r--drivers/net/qede/qede_eth_if.h35
-rw-r--r--drivers/net/qede/qede_ethdev.c1137
-rw-r--r--drivers/net/qede/qede_ethdev.h134
-rw-r--r--drivers/net/qede/qede_fdir.c469
-rw-r--r--drivers/net/qede/qede_if.h80
-rw-r--r--drivers/net/qede/qede_logs.h16
-rw-r--r--drivers/net/qede/qede_main.c181
-rw-r--r--drivers/net/qede/qede_rxtx.c1298
-rw-r--r--drivers/net/qede/qede_rxtx.h102
-rw-r--r--drivers/net/ring/Makefile5
-rw-r--r--drivers/net/ring/rte_eth_ring.c26
-rw-r--r--drivers/net/sfc/Makefile143
-rw-r--r--drivers/net/sfc/base/README36
-rw-r--r--drivers/net/sfc/base/ef10_ev.c1401
-rw-r--r--drivers/net/sfc/base/ef10_filter.c1501
-rw-r--r--drivers/net/sfc/base/ef10_impl.h1183
-rw-r--r--drivers/net/sfc/base/ef10_intr.c197
-rw-r--r--drivers/net/sfc/base/ef10_mac.c897
-rw-r--r--drivers/net/sfc/base/ef10_mcdi.c342
-rw-r--r--drivers/net/sfc/base/ef10_nic.c1780
-rw-r--r--drivers/net/sfc/base/ef10_nvram.c2385
-rw-r--r--drivers/net/sfc/base/ef10_phy.c631
-rw-r--r--drivers/net/sfc/base/ef10_rx.c965
-rw-r--r--drivers/net/sfc/base/ef10_tlv_layout.h941
-rw-r--r--drivers/net/sfc/base/ef10_tx.c710
-rw-r--r--drivers/net/sfc/base/ef10_vpd.c463
-rw-r--r--drivers/net/sfc/base/efx.h2535
-rw-r--r--drivers/net/sfc/base/efx_bootcfg.c563
-rw-r--r--drivers/net/sfc/base/efx_check.h346
-rw-r--r--drivers/net/sfc/base/efx_crc32.c122
-rw-r--r--drivers/net/sfc/base/efx_ev.c1470
-rw-r--r--drivers/net/sfc/base/efx_filter.c1424
-rw-r--r--drivers/net/sfc/base/efx_hash.c328
-rw-r--r--drivers/net/sfc/base/efx_impl.h1208
-rw-r--r--drivers/net/sfc/base/efx_intr.c572
-rw-r--r--drivers/net/sfc/base/efx_lic.c1751
-rw-r--r--drivers/net/sfc/base/efx_mac.c951
-rw-r--r--drivers/net/sfc/base/efx_mcdi.c2346
-rw-r--r--drivers/net/sfc/base/efx_mcdi.h415
-rw-r--r--drivers/net/sfc/base/efx_mon.c255
-rw-r--r--drivers/net/sfc/base/efx_nic.c1110
-rw-r--r--drivers/net/sfc/base/efx_nvram.c1044
-rw-r--r--drivers/net/sfc/base/efx_phy.c561
-rw-r--r--drivers/net/sfc/base/efx_phy_ids.h51
-rw-r--r--drivers/net/sfc/base/efx_port.c252
-rw-r--r--drivers/net/sfc/base/efx_regs.h3870
-rw-r--r--drivers/net/sfc/base/efx_regs_ef10.h571
-rw-r--r--drivers/net/sfc/base/efx_regs_mcdi.h15690
-rw-r--r--drivers/net/sfc/base/efx_regs_pci.h2356
-rw-r--r--drivers/net/sfc/base/efx_rx.c1315
-rw-r--r--drivers/net/sfc/base/efx_sram.c331
-rw-r--r--drivers/net/sfc/base/efx_tx.c1097
-rw-r--r--drivers/net/sfc/base/efx_types.h1647
-rw-r--r--drivers/net/sfc/base/efx_vpd.c1016
-rw-r--r--drivers/net/sfc/base/hunt_impl.h74
-rw-r--r--drivers/net/sfc/base/hunt_nic.c402
-rw-r--r--drivers/net/sfc/base/mcdi_mon.c565
-rw-r--r--drivers/net/sfc/base/mcdi_mon.h74
-rw-r--r--drivers/net/sfc/base/medford_impl.h67
-rw-r--r--drivers/net/sfc/base/medford_nic.c402
-rw-r--r--drivers/net/sfc/base/siena_flash.h215
-rw-r--r--drivers/net/sfc/base/siena_impl.h431
-rw-r--r--drivers/net/sfc/base/siena_mac.c476
-rw-r--r--drivers/net/sfc/base/siena_mcdi.c263
-rw-r--r--drivers/net/sfc/base/siena_nic.c585
-rw-r--r--drivers/net/sfc/base/siena_nvram.c734
-rw-r--r--drivers/net/sfc/base/siena_phy.c797
-rw-r--r--drivers/net/sfc/base/siena_sram.c178
-rw-r--r--drivers/net/sfc/base/siena_vpd.c618
-rw-r--r--drivers/net/sfc/efsys.h780
-rw-r--r--drivers/net/sfc/rte_pmd_sfc_efx_version.map4
-rw-r--r--drivers/net/sfc/sfc.c750
-rw-r--r--drivers/net/sfc/sfc.h322
-rw-r--r--drivers/net/sfc/sfc_debug.h59
-rw-r--r--drivers/net/sfc/sfc_dp.c100
-rw-r--r--drivers/net/sfc/sfc_dp.h125
-rw-r--r--drivers/net/sfc/sfc_dp_rx.h197
-rw-r--r--drivers/net/sfc/sfc_dp_tx.h170
-rw-r--r--drivers/net/sfc/sfc_ef10.h107
-rw-r--r--drivers/net/sfc/sfc_ef10_rx.c712
-rw-r--r--drivers/net/sfc/sfc_ef10_tx.c560
-rw-r--r--drivers/net/sfc/sfc_ethdev.c1642
-rw-r--r--drivers/net/sfc/sfc_ev.c921
-rw-r--r--drivers/net/sfc/sfc_ev.h129
-rw-r--r--drivers/net/sfc/sfc_filter.c137
-rw-r--r--drivers/net/sfc/sfc_filter.h62
-rw-r--r--drivers/net/sfc/sfc_flow.c1175
-rw-r--r--drivers/net/sfc/sfc_flow.h64
-rw-r--r--drivers/net/sfc/sfc_intr.c342
-rw-r--r--drivers/net/sfc/sfc_kvargs.c145
-rw-r--r--drivers/net/sfc/sfc_kvargs.h93
-rw-r--r--drivers/net/sfc/sfc_log.h76
-rw-r--r--drivers/net/sfc/sfc_mcdi.c331
-rw-r--r--drivers/net/sfc/sfc_port.c475
-rw-r--r--drivers/net/sfc/sfc_rx.c1327
-rw-r--r--drivers/net/sfc/sfc_rx.h180
-rw-r--r--drivers/net/sfc/sfc_tso.c201
-rw-r--r--drivers/net/sfc/sfc_tweak.h56
-rw-r--r--drivers/net/sfc/sfc_tx.c992
-rw-r--r--drivers/net/sfc/sfc_tx.h164
-rw-r--r--drivers/net/szedata2/Makefile7
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.c104
-rw-r--r--drivers/net/szedata2/rte_eth_szedata2.h60
-rw-r--r--drivers/net/tap/Makefile93
-rw-r--r--drivers/net/tap/rte_eth_tap.c1394
-rw-r--r--drivers/net/tap/rte_eth_tap.h100
-rw-r--r--drivers/net/tap/rte_pmd_tap_version.map4
-rw-r--r--drivers/net/tap/tap_flow.c1507
-rw-r--r--drivers/net/tap/tap_flow.h82
-rw-r--r--drivers/net/tap/tap_netlink.c367
-rw-r--r--drivers/net/tap/tap_netlink.h69
-rw-r--r--drivers/net/tap/tap_tcmsgs.c323
-rw-r--r--drivers/net/tap/tap_tcmsgs.h (renamed from drivers/net/null/rte_eth_null.h)37
-rw-r--r--drivers/net/thunderx/Makefile4
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.c12
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.h2
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h58
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.c9
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.h11
-rw-r--r--drivers/net/thunderx/base/nicvf_plat.h40
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c99
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c46
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h27
-rw-r--r--drivers/net/thunderx/nicvf_struct.h23
-rw-r--r--drivers/net/vhost/Makefile8
-rw-r--r--drivers/net/vhost/rte_eth_vhost.c314
-rw-r--r--drivers/net/vhost/rte_eth_vhost.h32
-rw-r--r--drivers/net/vhost/rte_pmd_vhost_version.map3
-rw-r--r--drivers/net/virtio/Makefile8
-rw-r--r--drivers/net/virtio/virtio_ethdev.c364
-rw-r--r--drivers/net/virtio/virtio_ethdev.h8
-rw-r--r--drivers/net/virtio/virtio_pci.c251
-rw-r--r--drivers/net/virtio/virtio_pci.h16
-rw-r--r--drivers/net/virtio/virtio_rxtx.c41
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.c5
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple.h6
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_neon.c6
-rw-r--r--drivers/net/virtio/virtio_rxtx_simple_sse.c6
-rw-r--r--drivers/net/virtio/virtio_user/vhost.h51
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel.c403
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.c133
-rw-r--r--drivers/net/virtio/virtio_user/vhost_kernel_tap.h67
-rw-r--r--drivers/net/virtio/virtio_user/vhost_user.c98
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.c246
-rw-r--r--drivers/net/virtio/virtio_user/virtio_user_dev.h20
-rw-r--r--drivers/net/virtio/virtio_user_ethdev.c175
-rw-r--r--drivers/net/virtio/virtqueue.c11
-rw-r--r--drivers/net/virtio/virtqueue.h24
-rw-r--r--drivers/net/vmxnet3/Makefile5
-rw-r--r--drivers/net/vmxnet3/base/vmxnet3_defs.h85
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.c203
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethdev.h38
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ring.h16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_rxtx.c148
-rw-r--r--drivers/net/xenvirt/Makefile6
-rw-r--r--drivers/net/xenvirt/rte_eth_xenvirt.c19
450 files changed, 145354 insertions, 14170 deletions
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index bc932309..35ed8135 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -31,33 +31,81 @@
include $(RTE_SDK)/mk/rte.vars.mk
+# set in mk/toolchain/xxx/rte.toolchain-compat.mk
+ifeq ($(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD),d)
+ $(warning thunderx pmd is not supported by old compilers)
+endif
+
+core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_ether
+core-libs += librte_net librte_kvargs
+
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
+DEPDIRS-af_packet = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
+DEPDIRS-ark = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
+DEPDIRS-avp = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
+DEPDIRS-bnx2x = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
+DEPDIRS-bonding = $(core-libs) librte_cmdline
DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe
+DEPDIRS-cxgbe = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2
+DEPDIRS-dpaa2 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += e1000
+DEPDIRS-e1000 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena
+DEPDIRS-ena = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic
+DEPDIRS-enic = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k
+DEPDIRS-fm10k = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e
+DEPDIRS-i40e = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe
+DEPDIRS-ixgbe = $(core-libs) librte_hash
+DIRS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += liquidio
+DEPDIRS-liquidio = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4
+DEPDIRS-mlx4 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5
-DIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe
+DEPDIRS-mlx5 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp
+DEPDIRS-nfp = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt
+DEPDIRS-bnxt = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null
+DEPDIRS-null = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += pcap
+DEPDIRS-pcap = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede
+DEPDIRS-qede = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += ring
+DEPDIRS-ring = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc
+DEPDIRS-sfc = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2
+DEPDIRS-szedata2 = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap
+DEPDIRS-tap = $(core-libs) librte_hash
DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx
+DEPDIRS-thunderx = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio
+DEPDIRS-virtio = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3
+DEPDIRS-vmxnet3 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt
+DEPDIRS-xenvirt = $(core-libs) librte_cmdline
+
+ifeq ($(CONFIG_RTE_LIBRTE_KNI),y)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += kni
+endif
+DEPDIRS-kni = $(core-libs) librte_kni
ifeq ($(CONFIG_RTE_LIBRTE_VHOST),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += vhost
endif # $(CONFIG_RTE_LIBRTE_VHOST)
+DEPDIRS-vhost = $(core-libs) librte_vhost
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile
index e14d6d0c..70d517c1 100644
--- a/drivers/net/af_packet/Makefile
+++ b/drivers/net/af_packet/Makefile
@@ -50,11 +50,4 @@ CFLAGS += $(WERROR_FLAGS)
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += rte_eth_af_packet.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c
index 45c6519f..68de45c3 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -38,6 +38,7 @@
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
#include <rte_vdev.h>
@@ -83,6 +84,7 @@ struct pkt_rx_queue {
struct pkt_tx_queue {
int sockfd;
+ unsigned int frame_data_size;
struct iovec *rd;
uint8_t *map;
@@ -98,6 +100,7 @@ struct pmd_internals {
unsigned nb_queues;
int if_index;
+ char *if_name;
struct ether_addr eth_addr;
struct tpacket_req req;
@@ -115,8 +118,6 @@ static const char *valid_arguments[] = {
NULL
};
-static const char *drivername = "AF_PACKET PMD";
-
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -161,6 +162,12 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
pbuf = (uint8_t *) ppd + ppd->tp_mac;
memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
+ /* check for vlan info */
+ if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
+ mbuf->vlan_tci = ppd->tp_vlan_tci;
+ mbuf->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+ }
+
/* release incoming frame and advance ring buffer */
ppd->tp_status = TP_STATUS_KERNEL;
if (++framenum >= framecount)
@@ -206,13 +213,28 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
framenum = pkt_q->framenum;
ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
for (i = 0; i < nb_pkts; i++) {
+ mbuf = *bufs++;
+
+ /* drop oversized packets */
+ if (rte_pktmbuf_data_len(mbuf) > pkt_q->frame_data_size) {
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ /* insert vlan info if necessary */
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ if (rte_vlan_insert(&mbuf)) {
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+ }
+
/* point at the next incoming frame */
if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
(poll(&pfd, 1, -1) < 0))
- continue;
+ break;
/* copy the tx frame data */
- mbuf = bufs[num_tx];
pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
sizeof(struct sockaddr_ll);
memcpy(pbuf, rte_pktmbuf_mtod(mbuf, void*), rte_pktmbuf_data_len(mbuf));
@@ -231,13 +253,13 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
/* kick-off transmits */
if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1)
- return 0; /* error sending -- no packets transmitted */
+ num_tx = 0; /* error sending -- no packets transmitted */
pkt_q->framenum = framenum;
pkt_q->tx_pkts += num_tx;
- pkt_q->err_pkts += nb_pkts - num_tx;
+ pkt_q->err_pkts += i - num_tx;
pkt_q->tx_bytes += num_tx_bytes;
- return num_tx;
+ return i;
}
static int
@@ -287,14 +309,12 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->driver_name = drivername;
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static void
@@ -377,18 +397,20 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
{
struct pmd_internals *internals = dev->data->dev_private;
struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
- uint16_t buf_size;
+ unsigned int buf_size, data_size;
pkt_q->mb_pool = mb_pool;
/* Now get the space available for data in the mbuf */
- buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ data_size = internals->req.tp_frame_size;
+ data_size -= TPACKET2_HDRLEN - sizeof(struct sockaddr_ll);
- if (ETH_FRAME_LEN > buf_size) {
+ if (data_size > buf_size) {
RTE_LOG(ERR, PMD,
"%s: %d bytes will not fit in mbuf (%d bytes)\n",
- dev->data->name, ETH_FRAME_LEN, buf_size);
+ dev->data->name, data_size, buf_size);
return -ENOMEM;
}
@@ -412,12 +434,80 @@ eth_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
}
+static int
+eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_mtu = mtu };
+ int ret;
+ int s;
+ unsigned int data_size = internals->req.tp_frame_size -
+ TPACKET2_HDRLEN -
+ sizeof(struct sockaddr_ll);
+
+ if (mtu > data_size)
+ return -EINVAL;
+
+ s = socket(PF_INET, SOCK_DGRAM, 0);
+ if (s < 0)
+ return -EINVAL;
+
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", internals->if_name);
+ ret = ioctl(s, SIOCSIFMTU, &ifr);
+ close(s);
+
+ if (ret < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
+{
+ struct ifreq ifr;
+ int s;
+
+ s = socket(PF_INET, SOCK_DGRAM, 0);
+ if (s < 0)
+ return;
+
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", if_name);
+ if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0)
+ goto out;
+ ifr.ifr_flags &= mask;
+ ifr.ifr_flags |= flags;
+ if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0)
+ goto out;
+out:
+ close(s);
+}
+
+static void
+eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
+}
+
+static void
+eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
+}
+
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
.dev_close = eth_dev_close,
.dev_configure = eth_dev_configure,
.dev_infos_get = eth_dev_info,
+ .mtu_set = eth_dev_mtu_set,
+ .promiscuous_enable = eth_dev_promiscuous_enable,
+ .promiscuous_disable = eth_dev_promiscuous_disable,
.rx_queue_setup = eth_rx_queue_setup,
.tx_queue_setup = eth_tx_queue_setup,
.rx_queue_release = eth_queue_release,
@@ -447,19 +537,22 @@ open_packet_iface(const char *key __rte_unused,
return 0;
}
+static struct rte_vdev_driver pmd_af_packet_drv;
+
static int
-rte_pmd_init_internals(const char *name,
+rte_pmd_init_internals(struct rte_vdev_device *dev,
const int sockfd,
const unsigned nb_queues,
unsigned int blocksize,
unsigned int blockcnt,
unsigned int framesize,
unsigned int framecnt,
- const unsigned numa_node,
struct pmd_internals **internals,
struct rte_eth_dev **eth_dev,
struct rte_kvargs *kvlist)
{
+ const char *name = rte_vdev_device_name(dev);
+ const unsigned int numa_node = dev->device.numa_node;
struct rte_eth_dev_data *data = NULL;
struct rte_kvargs_pair *pair = NULL;
struct ifreq ifr;
@@ -531,6 +624,7 @@ rte_pmd_init_internals(const char *name,
name);
goto error_early;
}
+ (*internals)->if_name = strdup(pair->value);
(*internals)->if_index = ifr.ifr_ifindex;
if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
@@ -640,6 +734,9 @@ rte_pmd_init_internals(const char *name,
tx_queue = &((*internals)->tx_queue[q]);
tx_queue->framecount = req->tp_frame_nr;
+ tx_queue->frame_data_size = req->tp_frame_size;
+ tx_queue->frame_data_size -= TPACKET2_HDRLEN -
+ sizeof(struct sockaddr_ll);
tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
@@ -673,7 +770,7 @@ rte_pmd_init_internals(const char *name,
}
/* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate(name);
+ *eth_dev = rte_eth_vdev_allocate(dev, 0);
if (*eth_dev == NULL)
goto error;
@@ -687,22 +784,16 @@ rte_pmd_init_internals(const char *name,
(*internals)->nb_queues = nb_queues;
+ rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
data->dev_private = *internals;
- data->port_id = (*eth_dev)->data->port_id;
data->nb_rx_queues = (uint16_t)nb_queues;
data->nb_tx_queues = (uint16_t)nb_queues;
data->dev_link = pmd_link;
data->mac_addrs = &(*internals)->eth_addr;
- strncpy(data->name,
- (*eth_dev)->data->name, strlen((*eth_dev)->data->name));
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
- (*eth_dev)->driver = NULL;
(*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- (*eth_dev)->data->drv_name = drivername;
- (*eth_dev)->data->kdrv = RTE_KDRV_NONE;
- (*eth_dev)->data->numa_node = numa_node;
return 0;
@@ -719,6 +810,7 @@ error:
((*internals)->rx_queue[q].sockfd != qsockfd))
close((*internals)->rx_queue[q].sockfd);
}
+ free((*internals)->if_name);
rte_free(*internals);
error_early:
rte_free(data);
@@ -726,11 +818,11 @@ error_early:
}
static int
-rte_eth_from_packet(const char *name,
+rte_eth_from_packet(struct rte_vdev_device *dev,
int const *sockfd,
- const unsigned numa_node,
struct rte_kvargs *kvlist)
{
+ const char *name = rte_vdev_device_name(dev);
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
struct rte_kvargs_pair *pair = NULL;
@@ -813,11 +905,11 @@ rte_eth_from_packet(const char *name,
RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize);
RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount);
- if (rte_pmd_init_internals(name, *sockfd, qpairs,
- blocksize, blockcount,
- framesize, framecount,
- numa_node, &internals, &eth_dev,
- kvlist) < 0)
+ if (rte_pmd_init_internals(dev, *sockfd, qpairs,
+ blocksize, blockcount,
+ framesize, framecount,
+ &internals, &eth_dev,
+ kvlist) < 0)
return -1;
eth_dev->rx_pkt_burst = eth_af_packet_rx;
@@ -827,18 +919,16 @@ rte_eth_from_packet(const char *name,
}
static int
-rte_pmd_af_packet_probe(const char *name, const char *params)
+rte_pmd_af_packet_probe(struct rte_vdev_device *dev)
{
- unsigned numa_node;
int ret = 0;
struct rte_kvargs *kvlist;
int sockfd = -1;
- RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n", name);
+ RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n",
+ rte_vdev_device_name(dev));
- numa_node = rte_socket_id();
-
- kvlist = rte_kvargs_parse(params, valid_arguments);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL) {
ret = -1;
goto exit;
@@ -856,7 +946,10 @@ rte_pmd_af_packet_probe(const char *name, const char *params)
goto exit;
}
- ret = rte_eth_from_packet(name, &sockfd, numa_node, kvlist);
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
+
+ ret = rte_eth_from_packet(dev, &sockfd, kvlist);
close(sockfd); /* no longer needed */
exit:
@@ -865,7 +958,7 @@ exit:
}
static int
-rte_pmd_af_packet_remove(const char *name)
+rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internals;
@@ -874,11 +967,11 @@ rte_pmd_af_packet_remove(const char *name)
RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n",
rte_socket_id());
- if (name == NULL)
+ if (dev == NULL)
return -1;
/* find the ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (eth_dev == NULL)
return -1;
@@ -887,6 +980,7 @@ rte_pmd_af_packet_remove(const char *name)
rte_free(internals->rx_queue[q].rd);
rte_free(internals->tx_queue[q].rd);
}
+ free(internals->if_name);
rte_free(eth_dev->data->dev_private);
rte_free(eth_dev->data);
diff --git a/drivers/net/ark/Makefile b/drivers/net/ark/Makefile
new file mode 100644
index 00000000..ca64b195
--- /dev/null
+++ b/drivers/net/ark/Makefile
@@ -0,0 +1,66 @@
+# BSD LICENSE
+#
+# Copyright (c) 2015-2017 Atomic Rules LLC
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ark.a
+
+CFLAGS += -O3 -I./
+CFLAGS += $(WERROR_FLAGS) -Werror
+
+EXPORT_MAP := rte_pmd_ark_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ddm.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_ethdev_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_mpu.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktchkr.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_pktgen.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_rqp.c
+SRCS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark_udm.c
+
+# this lib depends upon:
+LDLIBS += -lpthread
+ifdef CONFIG_RTE_EXEC_ENV_LINUXAPP
+LDLIBS += -ldl
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ark/ark_ddm.c b/drivers/net/ark/ark_ddm.c
new file mode 100644
index 00000000..221460c7
--- /dev/null
+++ b/drivers/net/ark/ark_ddm.c
@@ -0,0 +1,151 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_ddm.h"
+
+/* ************************************************************************* */
+int
+ark_ddm_verify(struct ark_ddm_t *ddm)
+{
+ if (sizeof(struct ark_ddm_t) != ARK_DDM_EXPECTED_SIZE) {
+ PMD_DRV_LOG(ERR, "ARK: DDM structure looks incorrect %d vs %zd\n",
+ ARK_DDM_EXPECTED_SIZE, sizeof(struct ark_ddm_t));
+ return -1;
+ }
+
+ if (ddm->cfg.const0 != ARK_DDM_CONST) {
+ PMD_DRV_LOG(ERR, "ARK: DDM module not found as expected 0x%08x\n",
+ ddm->cfg.const0);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_ddm_start(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.command = 1;
+}
+
+int
+ark_ddm_stop(struct ark_ddm_t *ddm, const int wait)
+{
+ int cnt = 0;
+
+ ddm->cfg.command = 2;
+ while (wait && (ddm->cfg.stop_flushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+void
+ark_ddm_reset(struct ark_ddm_t *ddm)
+{
+ int status;
+
+ /* reset only works if ddm has stopped properly. */
+ status = ark_ddm_stop(ddm, 1);
+
+ if (status != 0) {
+ PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n",
+ __func__);
+ ddm->cfg.command = 4;
+ usleep(10);
+ }
+ ddm->cfg.command = 3;
+}
+
+void
+ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t cons_addr, uint32_t interval)
+{
+ ddm->setup.cons_write_index_addr = cons_addr;
+ ddm->setup.write_index_interval = interval / 4; /* 4 ns period */
+}
+
+void
+ark_ddm_stats_reset(struct ark_ddm_t *ddm)
+{
+ ddm->cfg.tlp_stats_clear = 1;
+}
+
+void
+ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg)
+{
+ PMD_FUNC_LOG(DEBUG, "%s Stopped: %d\n", msg,
+ ark_ddm_is_stopped(ddm)
+ );
+}
+
+void
+ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg)
+{
+ struct ark_ddm_stats_t *stats = &ddm->stats;
+
+ PMD_STATS_LOG(INFO, "DDM Stats: %s"
+ ARK_SU64 ARK_SU64 ARK_SU64
+ "\n", msg,
+ "Bytes:", stats->tx_byte_count,
+ "Packets:", stats->tx_pkt_count,
+ "MBufs", stats->tx_mbuf_count);
+}
+
+int
+ark_ddm_is_stopped(struct ark_ddm_t *ddm)
+{
+ return (ddm->cfg.stop_flushed & 0x01) != 0;
+}
+
+uint64_t
+ark_ddm_queue_byte_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.byte_count;
+}
+
+uint64_t
+ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm)
+{
+ return ddm->queue_stats.pkt_count;
+}
+
+void
+ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm)
+{
+ ddm->queue_stats.byte_count = 1;
+}
diff --git a/drivers/net/ark/ark_ddm.h b/drivers/net/ark/ark_ddm.h
new file mode 100644
index 00000000..de61926c
--- /dev/null
+++ b/drivers/net/ark/ark_ddm.h
@@ -0,0 +1,177 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_DDM_H_
+#define _ARK_DDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+
+/* The DDM or Downstream Data Mover is an internal Arkville hardware
+ * module for moving packet from host memory to the TX packet streams.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/* struct defining Tx meta data -- fixed in FPGA -- 16 bytes */
+struct ark_tx_meta {
+ uint64_t physaddr;
+ uint32_t delta_ns;
+ uint16_t data_len; /* of this MBUF */
+#define ARK_DDM_EOP 0x01
+#define ARK_DDM_SOP 0x02
+ uint8_t flags; /* bit 0 indicates last mbuf in chain. */
+ uint8_t reserved[1];
+};
+
+
+/*
+ * DDM core hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+#define ARK_DDM_CFG 0x0000
+#define ARK_DDM_CONST 0xfacecafe
+struct ark_ddm_cfg_t {
+ uint32_t r0;
+ volatile uint32_t tlp_stats_clear;
+ uint32_t const0;
+ volatile uint32_t tag_max;
+ volatile uint32_t command;
+ volatile uint32_t stop_flushed;
+};
+
+#define ARK_DDM_STATS 0x0020
+struct ark_ddm_stats_t {
+ volatile uint64_t tx_byte_count;
+ volatile uint64_t tx_pkt_count;
+ volatile uint64_t tx_mbuf_count;
+};
+
+#define ARK_DDM_MRDQ 0x0040
+struct ark_ddm_mrdq_t {
+ volatile uint32_t mrd_q1;
+ volatile uint32_t mrd_q2;
+ volatile uint32_t mrd_q3;
+ volatile uint32_t mrd_q4;
+ volatile uint32_t mrd_full;
+};
+
+#define ARK_DDM_CPLDQ 0x0068
+struct ark_ddm_cpldq_t {
+ volatile uint32_t cpld_q1;
+ volatile uint32_t cpld_q2;
+ volatile uint32_t cpld_q3;
+ volatile uint32_t cpld_q4;
+ volatile uint32_t cpld_full;
+};
+
+#define ARK_DDM_MRD_PS 0x0090
+struct ark_ddm_mrd_ps_t {
+ volatile uint32_t mrd_ps_min;
+ volatile uint32_t mrd_ps_max;
+ volatile uint32_t mrd_full_ps_min;
+ volatile uint32_t mrd_full_ps_max;
+ volatile uint32_t mrd_dw_ps_min;
+ volatile uint32_t mrd_dw_ps_max;
+};
+
+#define ARK_DDM_QUEUE_STATS 0x00a8
+struct ark_ddm_qstats_t {
+ volatile uint64_t byte_count;
+ volatile uint64_t pkt_count;
+ volatile uint64_t mbuf_count;
+};
+
+#define ARK_DDM_CPLD_PS 0x00c0
+struct ark_ddm_cpld_ps_t {
+ volatile uint32_t cpld_ps_min;
+ volatile uint32_t cpld_ps_max;
+ volatile uint32_t cpld_full_ps_min;
+ volatile uint32_t cpld_full_ps_max;
+ volatile uint32_t cpld_dw_ps_min;
+ volatile uint32_t cpld_dw_ps_max;
+};
+
+#define ARK_DDM_SETUP 0x00e0
+struct ark_ddm_setup_t {
+ phys_addr_t cons_write_index_addr;
+ uint32_t write_index_interval; /* 4ns each */
+ volatile uint32_t cons_index;
+};
+
+#define ARK_DDM_EXPECTED_SIZE 256
+#define ARK_DDM_QOFFSET ARK_DDM_EXPECTED_SIZE
+/* Consolidated structure */
+struct ark_ddm_t {
+ struct ark_ddm_cfg_t cfg;
+ uint8_t reserved0[(ARK_DDM_STATS - ARK_DDM_CFG) -
+ sizeof(struct ark_ddm_cfg_t)];
+ struct ark_ddm_stats_t stats;
+ uint8_t reserved1[(ARK_DDM_MRDQ - ARK_DDM_STATS) -
+ sizeof(struct ark_ddm_stats_t)];
+ struct ark_ddm_mrdq_t mrdq;
+ uint8_t reserved2[(ARK_DDM_CPLDQ - ARK_DDM_MRDQ) -
+ sizeof(struct ark_ddm_mrdq_t)];
+ struct ark_ddm_cpldq_t cpldq;
+ uint8_t reserved3[(ARK_DDM_MRD_PS - ARK_DDM_CPLDQ) -
+ sizeof(struct ark_ddm_cpldq_t)];
+ struct ark_ddm_mrd_ps_t mrd_ps;
+ struct ark_ddm_qstats_t queue_stats;
+ struct ark_ddm_cpld_ps_t cpld_ps;
+ uint8_t reserved5[(ARK_DDM_SETUP - ARK_DDM_CPLD_PS) -
+ sizeof(struct ark_ddm_cpld_ps_t)];
+ struct ark_ddm_setup_t setup;
+ uint8_t reserved_p[(ARK_DDM_EXPECTED_SIZE - ARK_DDM_SETUP) -
+ sizeof(struct ark_ddm_setup_t)];
+};
+
+
+/* DDM function prototype */
+int ark_ddm_verify(struct ark_ddm_t *ddm);
+void ark_ddm_start(struct ark_ddm_t *ddm);
+int ark_ddm_stop(struct ark_ddm_t *ddm, const int wait);
+void ark_ddm_reset(struct ark_ddm_t *ddm);
+void ark_ddm_stats_reset(struct ark_ddm_t *ddm);
+void ark_ddm_setup(struct ark_ddm_t *ddm, phys_addr_t cons_addr,
+ uint32_t interval);
+void ark_ddm_dump_stats(struct ark_ddm_t *ddm, const char *msg);
+void ark_ddm_dump(struct ark_ddm_t *ddm, const char *msg);
+int ark_ddm_is_stopped(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_byte_count(struct ark_ddm_t *ddm);
+uint64_t ark_ddm_queue_pkt_count(struct ark_ddm_t *ddm);
+void ark_ddm_queue_reset_stats(struct ark_ddm_t *ddm);
+
+#endif
diff --git a/drivers/net/ark/ark_ethdev.c b/drivers/net/ark/ark_ethdev.c
new file mode 100644
index 00000000..995c93d3
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev.c
@@ -0,0 +1,1001 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <sys/stat.h>
+#include <dlfcn.h>
+
+#include <rte_ethdev_pci.h>
+#include <rte_kvargs.h>
+
+#include "ark_global.h"
+#include "ark_logs.h"
+#include "ark_ethdev.h"
+#include "ark_ethdev_tx.h"
+#include "ark_ethdev_rx.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_udm.h"
+#include "ark_rqp.h"
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+/* Internal prototypes */
+static int eth_ark_check_args(struct ark_adapter *ark, const char *params);
+static int eth_ark_dev_init(struct rte_eth_dev *dev);
+static int ark_config_device(struct rte_eth_dev *dev);
+static int eth_ark_dev_uninit(struct rte_eth_dev *eth_dev);
+static int eth_ark_dev_configure(struct rte_eth_dev *dev);
+static int eth_ark_dev_start(struct rte_eth_dev *dev);
+static void eth_ark_dev_stop(struct rte_eth_dev *dev);
+static void eth_ark_dev_close(struct rte_eth_dev *dev);
+static void eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static int eth_ark_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int eth_ark_dev_set_link_up(struct rte_eth_dev *dev);
+static int eth_ark_dev_set_link_down(struct rte_eth_dev *dev);
+static void eth_ark_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void eth_ark_dev_stats_reset(struct rte_eth_dev *dev);
+static void eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
+static int eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
+static void eth_ark_macaddr_remove(struct rte_eth_dev *dev,
+ uint32_t index);
+
+/*
+ * The packet generator is a functional block used to generate packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_PKTGEN_ARG "Pkt_gen"
+
+/*
+ * The packet checker is a functional block used to verify packet
+ * patterns for testing. It is not intended for nominal use.
+ */
+#define ARK_PKTCHKR_ARG "Pkt_chkr"
+
+/*
+ * The packet director is used to select the internal ingress and
+ * egress packets paths during testing. It is not intended for
+ * nominal use.
+ */
+#define ARK_PKTDIR_ARG "Pkt_dir"
+
+/* Devinfo configurations */
+#define ARK_RX_MAX_QUEUE (4096 * 4)
+#define ARK_RX_MIN_QUEUE (512)
+#define ARK_RX_MAX_PKT_LEN ((16 * 1024) - 128)
+#define ARK_RX_MIN_BUFSIZE (1024)
+
+#define ARK_TX_MAX_QUEUE (4096 * 4)
+#define ARK_TX_MIN_QUEUE (256)
+
+static const char * const valid_arguments[] = {
+ ARK_PKTGEN_ARG,
+ ARK_PKTCHKR_ARG,
+ ARK_PKTDIR_ARG,
+ NULL
+};
+
+static const struct rte_pci_id pci_id_ark_map[] = {
+ {RTE_PCI_DEVICE(0x1d6c, 0x100d)},
+ {RTE_PCI_DEVICE(0x1d6c, 0x100e)},
+ {.vendor_id = 0, /* sentinel */ },
+};
+
+static int
+eth_ark_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct ark_adapter));
+
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ ret = eth_ark_dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int
+eth_ark_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ark_dev_uninit);
+}
+
+static struct rte_pci_driver rte_ark_pmd = {
+ .id_table = pci_id_ark_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_ark_pci_probe,
+ .remove = eth_ark_pci_remove,
+};
+
+static const struct eth_dev_ops ark_eth_dev_ops = {
+ .dev_configure = eth_ark_dev_configure,
+ .dev_start = eth_ark_dev_start,
+ .dev_stop = eth_ark_dev_stop,
+ .dev_close = eth_ark_dev_close,
+
+ .dev_infos_get = eth_ark_dev_info_get,
+
+ .rx_queue_setup = eth_ark_dev_rx_queue_setup,
+ .rx_queue_count = eth_ark_dev_rx_queue_count,
+ .tx_queue_setup = eth_ark_tx_queue_setup,
+
+ .link_update = eth_ark_dev_link_update,
+ .dev_set_link_up = eth_ark_dev_set_link_up,
+ .dev_set_link_down = eth_ark_dev_set_link_down,
+
+ .rx_queue_start = eth_ark_rx_start_queue,
+ .rx_queue_stop = eth_ark_rx_stop_queue,
+
+ .tx_queue_start = eth_ark_tx_queue_start,
+ .tx_queue_stop = eth_ark_tx_queue_stop,
+
+ .stats_get = eth_ark_dev_stats_get,
+ .stats_reset = eth_ark_dev_stats_reset,
+
+ .mac_addr_add = eth_ark_macaddr_add,
+ .mac_addr_remove = eth_ark_macaddr_remove,
+ .mac_addr_set = eth_ark_set_default_mac_addr,
+};
+
+static int
+check_for_ext(struct ark_adapter *ark)
+{
+ int found = 0;
+
+ /* Get the env */
+ const char *dllpath = getenv("ARK_EXT_PATH");
+
+ if (dllpath == NULL) {
+ PMD_DEBUG_LOG(DEBUG, "ARK EXT NO dll path specified\n");
+ return 0;
+ }
+ PMD_DRV_LOG(INFO, "ARK EXT found dll path at %s\n", dllpath);
+
+ /* Open and load the .so */
+ ark->d_handle = dlopen(dllpath, RTLD_LOCAL | RTLD_LAZY);
+ if (ark->d_handle == NULL) {
+ PMD_DRV_LOG(ERR, "Could not load user extension %s\n",
+ dllpath);
+ return -1;
+ }
+ PMD_DRV_LOG(INFO, "SUCCESS: loaded user extension %s\n",
+ dllpath);
+
+ /* Get the entry points */
+ ark->user_ext.dev_init =
+ (void *(*)(struct rte_eth_dev *, void *, int))
+ dlsym(ark->d_handle, "dev_init");
+ PMD_DEBUG_LOG(DEBUG, "device ext init pointer = %p\n",
+ ark->user_ext.dev_init);
+ ark->user_ext.dev_get_port_count =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_get_port_count");
+ ark->user_ext.dev_uninit =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_uninit");
+ ark->user_ext.dev_configure =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_configure");
+ ark->user_ext.dev_start =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_start");
+ ark->user_ext.dev_stop =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_stop");
+ ark->user_ext.dev_close =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_close");
+ ark->user_ext.link_update =
+ (int (*)(struct rte_eth_dev *, int, void *))
+ dlsym(ark->d_handle, "link_update");
+ ark->user_ext.dev_set_link_up =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_set_link_up");
+ ark->user_ext.dev_set_link_down =
+ (int (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "dev_set_link_down");
+ ark->user_ext.stats_get =
+ (void (*)(struct rte_eth_dev *, struct rte_eth_stats *,
+ void *))
+ dlsym(ark->d_handle, "stats_get");
+ ark->user_ext.stats_reset =
+ (void (*)(struct rte_eth_dev *, void *))
+ dlsym(ark->d_handle, "stats_reset");
+ ark->user_ext.mac_addr_add =
+ (void (*)(struct rte_eth_dev *, struct ether_addr *, uint32_t,
+ uint32_t, void *))
+ dlsym(ark->d_handle, "mac_addr_add");
+ ark->user_ext.mac_addr_remove =
+ (void (*)(struct rte_eth_dev *, uint32_t, void *))
+ dlsym(ark->d_handle, "mac_addr_remove");
+ ark->user_ext.mac_addr_set =
+ (void (*)(struct rte_eth_dev *, struct ether_addr *,
+ void *))
+ dlsym(ark->d_handle, "mac_addr_set");
+
+ return found;
+}
+
+static int
+eth_ark_dev_init(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ struct rte_pci_device *pci_dev;
+ int ret;
+ int port_count = 1;
+ int p;
+
+ ark->eth_dev = dev;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ /* Check to see if there is an extension that we need to load */
+ ret = check_for_ext(ark);
+ if (ret)
+ return ret;
+ pci_dev = ARK_DEV_TO_PCI(dev);
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ /* Use dummy function until setup */
+ dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts_noop;
+
+ ark->bar0 = (uint8_t *)pci_dev->mem_resource[0].addr;
+ ark->a_bar = (uint8_t *)pci_dev->mem_resource[2].addr;
+
+ ark->sysctrl.v = (void *)&ark->bar0[ARK_SYSCTRL_BASE];
+ ark->mpurx.v = (void *)&ark->bar0[ARK_MPU_RX_BASE];
+ ark->udm.v = (void *)&ark->bar0[ARK_UDM_BASE];
+ ark->mputx.v = (void *)&ark->bar0[ARK_MPU_TX_BASE];
+ ark->ddm.v = (void *)&ark->bar0[ARK_DDM_BASE];
+ ark->cmac.v = (void *)&ark->bar0[ARK_CMAC_BASE];
+ ark->external.v = (void *)&ark->bar0[ARK_EXTERNAL_BASE];
+ ark->pktdir.v = (void *)&ark->bar0[ARK_PKTDIR_BASE];
+ ark->pktgen.v = (void *)&ark->bar0[ARK_PKTGEN_BASE];
+ ark->pktchkr.v = (void *)&ark->bar0[ARK_PKTCHKR_BASE];
+
+ ark->rqpacing =
+ (struct ark_rqpace_t *)(ark->bar0 + ARK_RCPACING_BASE);
+ ark->started = 0;
+
+ PMD_DEBUG_LOG(INFO, "Sys Ctrl Const = 0x%x HW Commit_ID: %08x\n",
+ ark->sysctrl.t32[4],
+ rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+ PMD_DRV_LOG(INFO, "Arkville HW Commit_ID: %08x\n",
+ rte_be_to_cpu_32(ark->sysctrl.t32[0x20 / 4]));
+
+ /* If HW sanity test fails, return an error */
+ if (ark->sysctrl.t32[4] != 0xcafef00d) {
+ PMD_DRV_LOG(ERR,
+ "HW Sanity test has failed, expected constant"
+ " 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d,
+ ark->sysctrl.t32[4], __func__);
+ return -1;
+ }
+ if (ark->sysctrl.t32[3] != 0) {
+ if (ark_rqp_lasped(ark->rqpacing)) {
+ PMD_DRV_LOG(ERR, "Arkville Evaluation System - "
+ "Timer has Expired\n");
+ return -1;
+ }
+ PMD_DRV_LOG(WARNING, "Arkville Evaluation System - "
+ "Timer is Running\n");
+ }
+
+ PMD_DRV_LOG(INFO,
+ "HW Sanity test has PASSED, expected constant"
+ " 0x%x, read 0x%x (%s)\n",
+ 0xcafef00d, ark->sysctrl.t32[4], __func__);
+
+ /* We are a single function multi-port device. */
+ ret = ark_config_device(dev);
+ dev->dev_ops = &ark_eth_dev_ops;
+ dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ dev->data->mac_addrs = rte_zmalloc("ark", ETHER_ADDR_LEN, 0);
+ if (!dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocated memory for storing mac address"
+ );
+ }
+
+ if (ark->user_ext.dev_init) {
+ ark->user_data = ark->user_ext.dev_init(dev, ark->a_bar, 0);
+ if (!ark->user_data) {
+ PMD_DRV_LOG(INFO,
+ "Failed to initialize PMD extension!"
+ " continuing without it\n");
+ memset(&ark->user_ext, 0, sizeof(struct ark_user_ext));
+ dlclose(ark->d_handle);
+ }
+ }
+
+ if (pci_dev->device.devargs)
+ ret = eth_ark_check_args(ark, pci_dev->device.devargs->args);
+ else
+ PMD_DRV_LOG(INFO, "No Device args found\n");
+
+ if (ret)
+ goto error;
+ /*
+ * We will create additional devices based on the number of requested
+ * ports
+ */
+ if (ark->user_ext.dev_get_port_count)
+ port_count =
+ ark->user_ext.dev_get_port_count(dev, ark->user_data);
+ ark->num_ports = port_count;
+
+ for (p = 0; p < port_count; p++) {
+ struct rte_eth_dev *eth_dev;
+ char name[RTE_ETH_NAME_MAX_LEN];
+
+ snprintf(name, sizeof(name), "arketh%d",
+ dev->data->port_id + p);
+
+ if (p == 0) {
+ /* First port is already allocated by DPDK */
+ eth_dev = ark->eth_dev;
+ continue;
+ }
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_dev_allocate(name);
+ if (!eth_dev) {
+ PMD_DRV_LOG(ERR,
+ "Could not allocate eth_dev for port %d\n",
+ p);
+ goto error;
+ }
+
+ eth_dev->device = &pci_dev->device;
+ eth_dev->data->dev_private = ark;
+ eth_dev->dev_ops = ark->eth_dev->dev_ops;
+ eth_dev->tx_pkt_burst = ark->eth_dev->tx_pkt_burst;
+ eth_dev->rx_pkt_burst = ark->eth_dev->rx_pkt_burst;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ eth_dev->data->mac_addrs = rte_zmalloc(name, ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_DRV_LOG(ERR,
+ "Memory allocation for MAC failed!"
+ " Exiting.\n");
+ goto error;
+ }
+
+ if (ark->user_ext.dev_init)
+ ark->user_data =
+ ark->user_ext.dev_init(dev, ark->a_bar, p);
+ }
+
+ return ret;
+
+ error:
+ if (dev->data->mac_addrs)
+ rte_free(dev->data->mac_addrs);
+ return -1;
+}
+
+/*
+ *Initial device configuration when device is opened
+ * setup the DDM, and UDM
+ * Called once per PCIE device
+ */
+static int
+ark_config_device(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ uint16_t num_q, i;
+ struct ark_mpu_t *mpu;
+
+ /*
+ * Make sure that the packet director, generator and checker are in a
+ * known state
+ */
+ ark->start_pg = 0;
+ ark->pg = ark_pktgen_init(ark->pktgen.v, 0, 1);
+ ark_pktgen_reset(ark->pg);
+ ark->pc = ark_pktchkr_init(ark->pktchkr.v, 0, 1);
+ ark_pktchkr_stop(ark->pc);
+ ark->pd = ark_pktdir_init(ark->pktdir.v);
+
+ /* Verify HW */
+ if (ark_udm_verify(ark->udm.v))
+ return -1;
+ if (ark_ddm_verify(ark->ddm.v))
+ return -1;
+
+ /* UDM */
+ if (ark_udm_reset(ark->udm.v)) {
+ PMD_DRV_LOG(ERR, "Unable to stop and reset UDM\n");
+ return -1;
+ }
+ /* Keep in reset until the MPU are cleared */
+
+ /* MPU reset */
+ mpu = ark->mpurx.v;
+ num_q = ark_api_num_queues(mpu);
+ ark->rx_queues = num_q;
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_udm_stop(ark->udm.v, 0);
+ ark_udm_configure(ark->udm.v,
+ RTE_PKTMBUF_HEADROOM,
+ RTE_MBUF_DEFAULT_DATAROOM,
+ ARK_RX_WRITE_TIME_NS);
+ ark_udm_stats_reset(ark->udm.v);
+ ark_udm_stop(ark->udm.v, 0);
+
+ /* TX -- DDM */
+ if (ark_ddm_stop(ark->ddm.v, 1))
+ PMD_DRV_LOG(ERR, "Unable to stop DDM\n");
+
+ mpu = ark->mputx.v;
+ num_q = ark_api_num_queues(mpu);
+ ark->tx_queues = num_q;
+ for (i = 0; i < num_q; i++) {
+ ark_mpu_reset(mpu);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+
+ ark_ddm_reset(ark->ddm.v);
+ ark_ddm_stats_reset(ark->ddm.v);
+
+ ark_ddm_stop(ark->ddm.v, 0);
+ ark_rqp_stats_reset(ark->rqpacing);
+
+ return 0;
+}
+
+static int
+eth_ark_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (ark->user_ext.dev_uninit)
+ ark->user_ext.dev_uninit(dev, ark->user_data);
+
+ ark_pktgen_uninit(ark->pg);
+ ark_pktchkr_uninit(ark->pc);
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+ if (dev->data->mac_addrs)
+ rte_free(dev->data->mac_addrs);
+ if (dev->data)
+ rte_free(dev->data);
+
+ return 0;
+}
+
+static int
+eth_ark_dev_configure(struct rte_eth_dev *dev)
+{
+ PMD_FUNC_LOG(DEBUG, "\n");
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ eth_ark_dev_set_link_up(dev);
+ if (ark->user_ext.dev_configure)
+ return ark->user_ext.dev_configure(dev, ark->user_data);
+ return 0;
+}
+
+static void *
+delay_pg_start(void *arg)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)arg;
+
+ /* This function is used exclusively for regression testing, We
+ * perform a blind sleep here to ensure that the external test
+ * application has time to setup the test before we generate packets
+ */
+ usleep(100000);
+ ark_pktgen_run(ark->pg);
+ return NULL;
+}
+
+static int
+eth_ark_dev_start(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ int i;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ /* RX Side */
+ /* start UDM */
+ ark_udm_start(ark->udm.v);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_ark_rx_start_queue(dev, i);
+
+ /* TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_ark_tx_queue_start(dev, i);
+
+ /* start DDM */
+ ark_ddm_start(ark->ddm.v);
+
+ ark->started = 1;
+ /* set xmit and receive function */
+ dev->rx_pkt_burst = &eth_ark_recv_pkts;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts;
+
+ if (ark->start_pg)
+ ark_pktchkr_run(ark->pc);
+
+ if (ark->start_pg && (dev->data->port_id == 0)) {
+ pthread_t thread;
+
+ /* Delay packet generatpr start allow the hardware to be ready
+ * This is only used for sanity checking with internal generator
+ */
+ pthread_create(&thread, NULL, delay_pg_start, ark);
+ }
+
+ if (ark->user_ext.dev_start)
+ ark->user_ext.dev_start(dev, ark->user_data);
+
+ return 0;
+}
+
+static void
+eth_ark_dev_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ int status;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ struct ark_mpu_t *mpu;
+
+ PMD_FUNC_LOG(DEBUG, "\n");
+
+ if (ark->started == 0)
+ return;
+ ark->started = 0;
+
+ /* Stop the extension first */
+ if (ark->user_ext.dev_stop)
+ ark->user_ext.dev_stop(dev, ark->user_data);
+
+ /* Stop the packet generator */
+ if (ark->start_pg)
+ ark_pktgen_pause(ark->pg);
+
+ dev->rx_pkt_burst = &eth_ark_recv_pkts_noop;
+ dev->tx_pkt_burst = &eth_ark_xmit_pkts_noop;
+
+ /* STOP TX Side */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ status = eth_ark_tx_queue_stop(dev, i);
+ if (status != 0) {
+ uint8_t port = dev->data->port_id;
+ PMD_DRV_LOG(ERR,
+ "tx_queue stop anomaly"
+ " port %u, queue %u\n",
+ port, i);
+ }
+ }
+
+ /* Stop DDM */
+ /* Wait up to 0.1 second. each stop is upto 1000 * 10 useconds */
+ for (i = 0; i < 10; i++) {
+ status = ark_ddm_stop(ark->ddm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "DDM stop anomaly. status:"
+ " %d iter: %u. (%s)\n",
+ status,
+ i,
+ __func__);
+ ark_ddm_dump(ark->ddm.v, "Stop anomaly");
+
+ mpu = ark->mputx.v;
+ for (i = 0; i < ark->tx_queues; i++) {
+ ark_mpu_dump(mpu, "DDM failure dump", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ /* STOP RX Side */
+ /* Stop UDM multiple tries attempted */
+ for (i = 0; i < 10; i++) {
+ status = ark_udm_stop(ark->udm.v, 1);
+ if (status == 0)
+ break;
+ }
+ if (status || i != 0) {
+ PMD_DRV_LOG(ERR, "UDM stop anomaly. status %d iter: %u. (%s)\n",
+ status, i, __func__);
+ ark_udm_dump(ark->udm.v, "Stop anomaly");
+
+ mpu = ark->mpurx.v;
+ for (i = 0; i < ark->rx_queues; i++) {
+ ark_mpu_dump(mpu, "UDM Stop anomaly", i);
+ mpu = RTE_PTR_ADD(mpu, ARK_MPU_QOFFSET);
+ }
+ }
+
+ ark_udm_dump_stats(ark->udm.v, "Post stop");
+ ark_udm_dump_perf(ark->udm.v, "Post stop");
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_ark_rx_dump_queue(dev, i, __func__);
+
+ /* Stop the packet checker if it is running */
+ if (ark->start_pg) {
+ ark_pktchkr_dump_stats(ark->pc);
+ ark_pktchkr_stop(ark->pc);
+ }
+}
+
+static void
+eth_ark_dev_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ uint16_t i;
+
+ if (ark->user_ext.dev_close)
+ ark->user_ext.dev_close(dev, ark->user_data);
+
+ eth_ark_dev_stop(dev);
+ eth_ark_udm_force_close(dev);
+
+ /*
+ * TODO This should only be called once for the device during shutdown
+ */
+ ark_rqp_dump(ark->rqpacing);
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ eth_ark_tx_queue_release(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = 0;
+ }
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ eth_ark_dev_rx_queue_release(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = 0;
+ }
+}
+
+static void
+eth_ark_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+ struct ark_mpu_t *tx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_TX_BASE);
+ struct ark_mpu_t *rx_mpu = RTE_PTR_ADD(ark->bar0, ARK_MPU_RX_BASE);
+ uint16_t ports = ark->num_ports;
+
+ dev_info->max_rx_pktlen = ARK_RX_MAX_PKT_LEN;
+ dev_info->min_rx_bufsize = ARK_RX_MIN_BUFSIZE;
+
+ dev_info->max_rx_queues = ark_api_num_queues_per_port(rx_mpu, ports);
+ dev_info->max_tx_queues = ark_api_num_queues_per_port(tx_mpu, ports);
+
+ dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ARK_RX_MAX_QUEUE,
+ .nb_min = ARK_RX_MIN_QUEUE,
+ .nb_align = ARK_RX_MIN_QUEUE}; /* power of 2 */
+
+ dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+ .nb_max = ARK_TX_MAX_QUEUE,
+ .nb_min = ARK_TX_MIN_QUEUE,
+ .nb_align = ARK_TX_MIN_QUEUE}; /* power of 2 */
+
+ /* ARK PMD supports all line rates, how do we indicate that here ?? */
+ dev_info->speed_capa = (ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G |
+ ETH_LINK_SPEED_100G);
+ dev_info->pci_dev = ARK_DEV_TO_PCI(dev);
+}
+
+static int
+eth_ark_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ PMD_DEBUG_LOG(DEBUG, "link status = %d\n",
+ dev->data->dev_link.link_status);
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.link_update) {
+ return ark->user_ext.link_update
+ (dev, wait_to_complete,
+ ark->user_data);
+ }
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 1;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_up)
+ return ark->user_ext.dev_set_link_up(dev, ark->user_data);
+ return 0;
+}
+
+static int
+eth_ark_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ dev->data->dev_link.link_status = 0;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.dev_set_link_down)
+ return ark->user_ext.dev_set_link_down(dev, ark->user_data);
+ return 0;
+}
+
+static void
+eth_ark_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ uint16_t i;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ stats->ipackets = 0;
+ stats->ibytes = 0;
+ stats->opackets = 0;
+ stats->obytes = 0;
+ stats->imissed = 0;
+ stats->oerrors = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_tx_queue_stats_get(dev->data->tx_queues[i], stats);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_rx_queue_stats_get(dev->data->rx_queues[i], stats);
+ if (ark->user_ext.stats_get)
+ ark->user_ext.stats_get(dev, stats, ark->user_data);
+}
+
+static void
+eth_ark_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ eth_tx_queue_stats_reset(dev->data->rx_queues[i]);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ eth_rx_queue_stats_reset(dev->data->rx_queues[i]);
+ if (ark->user_ext.stats_reset)
+ ark->user_ext.stats_reset(dev, ark->user_data);
+}
+
+static int
+eth_ark_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_add) {
+ ark->user_ext.mac_addr_add(dev,
+ mac_addr,
+ index,
+ pool,
+ ark->user_data);
+ return 0;
+ }
+ return -ENOTSUP;
+}
+
+static void
+eth_ark_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_remove)
+ ark->user_ext.mac_addr_remove(dev, index, ark->user_data);
+}
+
+static void
+eth_ark_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct ark_adapter *ark =
+ (struct ark_adapter *)dev->data->dev_private;
+
+ if (ark->user_ext.mac_addr_set)
+ ark->user_ext.mac_addr_set(dev, mac_addr, ark->user_data);
+}
+
+static inline int
+process_pktdir_arg(const char *key, const char *value,
+ void *extra_args)
+{
+ PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n",
+ key, value);
+ struct ark_adapter *ark =
+ (struct ark_adapter *)extra_args;
+
+ ark->pkt_dir_v = strtol(value, NULL, 16);
+ PMD_FUNC_LOG(DEBUG, "pkt_dir_v = 0x%x\n", ark->pkt_dir_v);
+ return 0;
+}
+
+static inline int
+process_file_args(const char *key, const char *value, void *extra_args)
+{
+ PMD_FUNC_LOG(DEBUG, "key = %s, value = %s\n",
+ key, value);
+ char *args = (char *)extra_args;
+
+ /* Open the configuration file */
+ FILE *file = fopen(value, "r");
+ char line[ARK_MAX_ARG_LEN];
+ int size = 0;
+ int first = 1;
+
+ while (fgets(line, sizeof(line), file)) {
+ size += strlen(line);
+ if (size >= ARK_MAX_ARG_LEN) {
+ PMD_DRV_LOG(ERR, "Unable to parse file %s args, "
+ "parameter list is too long\n", value);
+ fclose(file);
+ return -1;
+ }
+ if (first) {
+ strncpy(args, line, ARK_MAX_ARG_LEN);
+ first = 0;
+ } else {
+ strncat(args, line, ARK_MAX_ARG_LEN);
+ }
+ }
+ PMD_FUNC_LOG(DEBUG, "file = %s\n", args);
+ fclose(file);
+ return 0;
+}
+
+static int
+eth_ark_check_args(struct ark_adapter *ark, const char *params)
+{
+ struct rte_kvargs *kvlist;
+ unsigned int k_idx;
+ struct rte_kvargs_pair *pair = NULL;
+ int ret = -1;
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return 0;
+
+ ark->pkt_gen_args[0] = 0;
+ ark->pkt_chkr_args[0] = 0;
+
+ for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
+ pair = &kvlist->pairs[k_idx];
+ PMD_FUNC_LOG(DEBUG, "**** Arg passed to PMD = %s:%s\n",
+ pair->key,
+ pair->value);
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTDIR_ARG,
+ &process_pktdir_arg,
+ ark) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTDIR_ARG);
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTGEN_ARG,
+ &process_file_args,
+ ark->pkt_gen_args) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTGEN_ARG);
+ goto free_kvlist;
+ }
+
+ if (rte_kvargs_process(kvlist,
+ ARK_PKTCHKR_ARG,
+ &process_file_args,
+ ark->pkt_chkr_args) != 0) {
+ PMD_DRV_LOG(ERR, "Unable to parse arg %s\n", ARK_PKTCHKR_ARG);
+ goto free_kvlist;
+ }
+
+ PMD_DRV_LOG(INFO, "packet director set to 0x%x\n", ark->pkt_dir_v);
+ /* Setup the packet director */
+ ark_pktdir_setup(ark->pd, ark->pkt_dir_v);
+
+ /* Setup the packet generator */
+ if (ark->pkt_gen_args[0]) {
+ PMD_DRV_LOG(INFO, "Setting up the packet generator\n");
+ ark_pktgen_parse(ark->pkt_gen_args);
+ ark_pktgen_reset(ark->pg);
+ ark_pktgen_setup(ark->pg);
+ ark->start_pg = 1;
+ }
+
+ /* Setup the packet checker */
+ if (ark->pkt_chkr_args[0]) {
+ ark_pktchkr_parse(ark->pkt_chkr_args);
+ ark_pktchkr_setup(ark->pc);
+ }
+
+ ret = 0;
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+RTE_PMD_REGISTER_PCI(net_ark, rte_ark_pmd);
+RTE_PMD_REGISTER_KMOD_DEP(net_ark, "* igb_uio | uio_pci_generic ");
+RTE_PMD_REGISTER_PCI_TABLE(net_ark, pci_id_ark_map);
+RTE_PMD_REGISTER_PARAM_STRING(net_ark,
+ ARK_PKTGEN_ARG "=<filename> "
+ ARK_PKTCHKR_ARG "=<filename> "
+ ARK_PKTDIR_ARG "=<bitmap>");
diff --git a/drivers/net/ark/ark_ethdev.h b/drivers/net/ark/ark_ethdev.h
new file mode 100644
index 00000000..9f8d32fc
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev.h
@@ -0,0 +1,41 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_ETHDEV_H_
+#define _ARK_ETHDEV_H_
+
+#define ARK_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
+
+#endif
diff --git a/drivers/net/ark/ark_ethdev_rx.c b/drivers/net/ark/ark_ethdev_rx.c
new file mode 100644
index 00000000..f39e6f68
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_rx.c
@@ -0,0 +1,673 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_ethdev_rx.h"
+#include "ark_global.h"
+#include "ark_logs.h"
+#include "ark_ethdev.h"
+#include "ark_mpu.h"
+#include "ark_udm.h"
+
+#define ARK_RX_META_SIZE 32
+#define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
+#define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+/* Forward declarations */
+struct ark_rx_queue;
+struct ark_rx_meta;
+
+static void dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi);
+static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue);
+static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta,
+ struct rte_mbuf *mbuf0,
+ uint32_t cons_index);
+static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
+
+/* ************************************************************************* */
+struct ark_rx_queue {
+ /* array of mbufs to populate */
+ struct rte_mbuf **reserve_q;
+ /* array of physical addresses of the mbuf data pointer */
+ /* This point is a virtual address */
+ phys_addr_t *paddress_q;
+ struct rte_mempool *mb_pool;
+
+ struct ark_udm_t *udm;
+ struct ark_mpu_t *mpu;
+
+ uint32_t queue_size;
+ uint32_t queue_mask;
+
+ uint32_t seed_index; /* step 1 set with empty mbuf */
+ uint32_t cons_index; /* step 3 consumed by driver */
+
+ /* The queue Id is used to identify the HW Q */
+ uint16_t phys_qid;
+
+ /* The queue Index is used within the dpdk device structures */
+ uint16_t queue_index;
+
+ uint32_t pad1;
+
+ /* separate cache line */
+ /* second cache line - fields only used in slow path */
+ MARKER cacheline1 __rte_cache_min_aligned;
+
+ volatile uint32_t prod_index; /* step 2 filled by FPGA */
+} __rte_cache_aligned;
+
+
+/* ************************************************************************* */
+static int
+eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
+ struct ark_rx_queue *queue,
+ uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
+{
+ phys_addr_t queue_base;
+ phys_addr_t phys_addr_q_base;
+ phys_addr_t phys_addr_prod_index;
+
+ queue_base = rte_malloc_virt2phy(queue);
+ phys_addr_prod_index = queue_base +
+ offsetof(struct ark_rx_queue, prod_index);
+
+ phys_addr_q_base = rte_malloc_virt2phy(queue->paddress_q);
+
+ /* Verify HW */
+ if (ark_mpu_verify(queue->mpu, sizeof(phys_addr_t))) {
+ PMD_DRV_LOG(ERR, "Illegal configuration rx queue\n");
+ return -1;
+ }
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, phys_addr_q_base, queue->queue_size, 0);
+
+ ark_udm_write_addr(queue->udm, phys_addr_prod_index);
+
+ /* advance the valid pointer, but don't start until the queue starts */
+ ark_mpu_reset_stats(queue->mpu);
+
+ /* The seed is the producer index for the HW */
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ dev->data->rx_queue_state[rx_queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+static inline void
+eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
+{
+ queue->cons_index = cons_index;
+ eth_ark_rx_seed_mbufs(queue);
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+}
+
+/* ************************************************************************* */
+int
+eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ static int warning1; /* = 0 */
+ struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+
+ struct ark_rx_queue *queue;
+ uint32_t i;
+ int status;
+
+ /* Future works: divide the Q's evenly with multi-ports */
+ int port = dev->data->port_id;
+ int qidx = port + queue_idx;
+
+ /* We may already be setup, free memory prior to re-allocation */
+ if (dev->data->rx_queues[queue_idx] != NULL) {
+ eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
+ dev->data->rx_queues[queue_idx] = NULL;
+ }
+
+ if (rx_conf != NULL && warning1 == 0) {
+ warning1 = 1;
+ PMD_DRV_LOG(INFO,
+ "Arkville ignores rte_eth_rxconf argument.\n");
+ }
+
+ if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "Error: DPDK Arkville requires head room > %d bytes (%s)\n",
+ ARK_RX_META_SIZE, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1; /* ERROR CODE */
+ }
+
+ /* Allocate queue struct */
+ queue = rte_zmalloc_socket("Ark_rxqueue",
+ sizeof(struct ark_rx_queue),
+ 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* NOTE zmalloc is used, no need to 0 indexes, etc. */
+ queue->mb_pool = mb_pool;
+ queue->phys_qid = qidx;
+ queue->queue_index = queue_idx;
+ queue->queue_size = nb_desc;
+ queue->queue_mask = nb_desc - 1;
+
+ queue->reserve_q =
+ rte_zmalloc_socket("Ark_rx_queue mbuf",
+ nb_desc * sizeof(struct rte_mbuf *),
+ 64,
+ socket_id);
+ queue->paddress_q =
+ rte_zmalloc_socket("Ark_rx_queue paddr",
+ nb_desc * sizeof(phys_addr_t),
+ 64,
+ socket_id);
+
+ if (queue->reserve_q == 0 || queue->paddress_q == 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate queue memory in %s\n",
+ __func__);
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ dev->data->rx_queues[queue_idx] = queue;
+ queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
+
+ /* populate mbuf reserve */
+ status = eth_ark_rx_seed_mbufs(queue);
+
+ /* MPU Setup */
+ if (status == 0)
+ status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx);
+
+ if (unlikely(status != 0)) {
+ struct rte_mbuf *mbuf;
+
+ PMD_DRV_LOG(ERR, "Failed to initialize RX queue %d %s\n",
+ qidx,
+ __func__);
+ /* Free the mbufs allocated */
+ for (i = 0, mbuf = queue->reserve_q[0];
+ i < nb_desc; ++i, mbuf++) {
+ rte_pktmbuf_free(mbuf);
+ }
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts_noop(void *rx_queue __rte_unused,
+ struct rte_mbuf **rx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct ark_rx_queue *queue;
+ register uint32_t cons_index, prod_index;
+ uint16_t nb;
+ struct rte_mbuf *mbuf;
+ struct ark_rx_meta *meta;
+
+ queue = (struct ark_rx_queue *)rx_queue;
+ if (unlikely(queue == 0))
+ return 0;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+ prod_index = queue->prod_index;
+ cons_index = queue->cons_index;
+ nb = 0;
+
+ while (prod_index != cons_index) {
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ /* prefetch mbuf */
+ rte_mbuf_prefetch_part1(mbuf);
+ rte_mbuf_prefetch_part2(mbuf);
+
+ /* META DATA embedded in headroom */
+ meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
+
+ mbuf->port = meta->port;
+ mbuf->pkt_len = meta->pkt_len;
+ mbuf->data_len = meta->pkt_len;
+ mbuf->timestamp = meta->timestamp;
+ mbuf->udata64 = meta->user_data;
+
+ if (ARK_RX_DEBUG) { /* debug sanity checks */
+ if ((meta->pkt_len > (1024 * 16)) ||
+ (meta->pkt_len == 0)) {
+ PMD_RX_LOG(DEBUG, "RX: Bad Meta Q: %u"
+ " cons: %" PRIU32
+ " prod: %" PRIU32
+ " seed_index %" PRIU32
+ "\n",
+ queue->phys_qid,
+ cons_index,
+ queue->prod_index,
+ queue->seed_index);
+
+
+ PMD_RX_LOG(DEBUG, " : UDM"
+ " prod: %" PRIU32
+ " len: %u\n",
+ queue->udm->rt_cfg.prod_idx,
+ meta->pkt_len);
+ ark_mpu_dump(queue->mpu,
+ " ",
+ queue->phys_qid);
+ dump_mbuf_data(mbuf, 0, 256);
+ /* its FUBAR so fix it */
+ mbuf->pkt_len = 63;
+ meta->pkt_len = 63;
+ }
+ /* seqn is only set under debug */
+ mbuf->seqn = cons_index;
+ }
+
+ if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN))
+ cons_index = eth_ark_rx_jumbo
+ (queue, meta, mbuf, cons_index + 1);
+ else
+ cons_index += 1;
+
+ rx_pkts[nb] = mbuf;
+ nb++;
+ if (nb >= nb_pkts)
+ break;
+ }
+
+ if (unlikely(nb != 0))
+ /* report next free to FPGA */
+ eth_ark_rx_update_cons_index(queue, cons_index);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_rx_jumbo(struct ark_rx_queue *queue,
+ struct ark_rx_meta *meta,
+ struct rte_mbuf *mbuf0,
+ uint32_t cons_index)
+{
+ struct rte_mbuf *mbuf_prev;
+ struct rte_mbuf *mbuf;
+
+ uint16_t remaining;
+ uint16_t data_len;
+ uint8_t segments;
+
+ /* first buf populated by called */
+ mbuf_prev = mbuf0;
+ segments = 1;
+ data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
+ remaining = meta->pkt_len - data_len;
+ mbuf0->data_len = data_len;
+
+ /* HW guarantees that the data does not exceed prod_index! */
+ while (remaining != 0) {
+ data_len = RTE_MIN(remaining,
+ RTE_MBUF_DEFAULT_DATAROOM +
+ RTE_PKTMBUF_HEADROOM);
+
+ remaining -= data_len;
+ segments += 1;
+
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ mbuf_prev->next = mbuf;
+ mbuf_prev = mbuf;
+ mbuf->data_len = data_len;
+ mbuf->data_off = 0;
+ if (ARK_RX_DEBUG)
+ mbuf->seqn = cons_index; /* for debug only */
+
+ cons_index += 1;
+ }
+
+ mbuf0->nb_segs = segments;
+ return cons_index;
+}
+
+/* Drain the internal queue allowing hw to clear out. */
+static void
+eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
+{
+ register uint32_t cons_index;
+ struct rte_mbuf *mbuf;
+
+ cons_index = queue->cons_index;
+
+ /* NOT performance optimized, since this is a one-shot call */
+ while ((cons_index ^ queue->prod_index) & queue->queue_mask) {
+ mbuf = queue->reserve_q[cons_index & queue->queue_mask];
+ rte_pktmbuf_free(mbuf);
+ cons_index++;
+ eth_ark_rx_update_cons_index(queue, cons_index);
+ }
+}
+
+uint32_t
+eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ return (queue->prod_index - queue->cons_index); /* mod arith */
+}
+
+/* ************************************************************************* */
+int
+eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ ark_mpu_set_producer(queue->mpu, queue->seed_index);
+ ark_mpu_start(queue->mpu);
+
+ ark_udm_queue_enable(queue->udm, 1);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+
+/* Queue can be restarted. data remains
+ */
+int
+eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+ if (queue == 0)
+ return -1;
+
+ ark_udm_queue_enable(queue->udm, 0);
+
+ dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static inline int
+eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
+{
+ uint32_t limit = queue->cons_index + queue->queue_size;
+ uint32_t seed_index = queue->seed_index;
+
+ uint32_t count = 0;
+ uint32_t seed_m = queue->seed_index & queue->queue_mask;
+
+ uint32_t nb = limit - seed_index;
+
+ /* Handle wrap around -- remainder is filled on the next call */
+ if (unlikely(seed_m + nb > queue->queue_size))
+ nb = queue->queue_size - seed_m;
+
+ struct rte_mbuf **mbufs = &queue->reserve_q[seed_m];
+ int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
+
+ if (unlikely(status != 0))
+ return -1;
+
+ if (ARK_RX_DEBUG) { /* DEBUG */
+ while (count != nb) {
+ struct rte_mbuf *mbuf_init =
+ queue->reserve_q[seed_m + count];
+
+ memset(mbuf_init->buf_addr, -1, 512);
+ *((uint32_t *)mbuf_init->buf_addr) =
+ seed_index + count;
+ *(uint16_t *)RTE_PTR_ADD(mbuf_init->buf_addr, 4) =
+ queue->phys_qid;
+ count++;
+ }
+ count = 0;
+ } /* DEBUG */
+ queue->seed_index += nb;
+
+ /* Duff's device https://en.wikipedia.org/wiki/Duff's_device */
+ switch (nb % 4) {
+ case 0:
+ while (count != nb) {
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+ case 3:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+ case 2:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+ case 1:
+ queue->paddress_q[seed_m++] =
+ (*mbufs++)->buf_physaddr;
+ count++;
+ /* FALLTHROUGH */
+
+ } /* while (count != nb) */
+ } /* switch */
+
+ return 0;
+}
+
+void
+eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg)
+{
+ struct ark_rx_queue *queue;
+
+ queue = dev->data->rx_queues[queue_id];
+
+ ark_ethdev_rx_dump(msg, queue);
+}
+
+/* ************************************************************************* */
+/* Call on device closed no user API, queue is stopped */
+void
+eth_ark_dev_rx_queue_release(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+ uint32_t i;
+
+ queue = (struct ark_rx_queue *)vqueue;
+ if (queue == 0)
+ return;
+
+ ark_udm_queue_enable(queue->udm, 0);
+ /* Stop the MPU since pointer are going away */
+ ark_mpu_stop(queue->mpu);
+
+ /* Need to clear out mbufs here, dropping packets along the way */
+ eth_ark_rx_queue_drain(queue);
+
+ for (i = 0; i < queue->queue_size; ++i)
+ rte_pktmbuf_free(queue->reserve_q[i]);
+
+ rte_free(queue->reserve_q);
+ rte_free(queue->paddress_q);
+ rte_free(queue);
+}
+
+void
+eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_rx_queue *queue;
+ struct ark_udm_t *udm;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+ udm = queue->udm;
+
+ uint64_t ibytes = ark_udm_bytes(udm);
+ uint64_t ipackets = ark_udm_packets(udm);
+ uint64_t idropped = ark_udm_dropped(queue->udm);
+
+ stats->q_ipackets[queue->queue_index] = ipackets;
+ stats->q_ibytes[queue->queue_index] = ibytes;
+ stats->q_errors[queue->queue_index] = idropped;
+ stats->ipackets += ipackets;
+ stats->ibytes += ibytes;
+ stats->imissed += idropped;
+}
+
+void
+eth_rx_queue_stats_reset(void *vqueue)
+{
+ struct ark_rx_queue *queue;
+
+ queue = vqueue;
+ if (queue == 0)
+ return;
+
+ ark_mpu_reset_stats(queue->mpu);
+ ark_udm_queue_stats_reset(queue->udm);
+}
+
+void
+eth_ark_udm_force_close(struct rte_eth_dev *dev)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+ struct ark_rx_queue *queue;
+ uint32_t index;
+ uint16_t i;
+
+ if (!ark_udm_is_flushed(ark->udm.v)) {
+ /* restart the MPUs */
+ PMD_DRV_LOG(ERR, "ARK: %s UDM not flushed\n", __func__);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ queue = (struct ark_rx_queue *)dev->data->rx_queues[i];
+ if (queue == 0)
+ continue;
+
+ ark_mpu_start(queue->mpu);
+ /* Add some buffers */
+ index = 100000 + queue->seed_index;
+ ark_mpu_set_producer(queue->mpu, index);
+ }
+ /* Wait to allow data to pass */
+ usleep(100);
+
+ PMD_DEBUG_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n",
+ ark_udm_is_flushed(ark->udm.v));
+ }
+ ark_udm_reset(ark->udm.v);
+}
+
+static void
+ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
+{
+ if (queue == NULL)
+ return;
+ PMD_DEBUG_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name);
+ PMD_DEBUG_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
+ "queue_size", queue->queue_size,
+ "seed_index", queue->seed_index,
+ "prod_index", queue->prod_index,
+ "cons_index", queue->cons_index);
+
+ ark_mpu_dump(queue->mpu, name, queue->phys_qid);
+ ark_mpu_dump_setup(queue->mpu, queue->phys_qid);
+ ark_udm_dump(queue->udm, name);
+ ark_udm_dump_setup(queue->udm, queue->phys_qid);
+}
+
+/* Only used in debug.
+ * This function is a raw memory dump of a portion of an mbuf's memory
+ * region. The usual function, rte_pktmbuf_dump() only shows data
+ * with respect to the data_off field. This function show data
+ * anywhere in the mbuf's buffer. This is useful for examining
+ * data in the headroom or tailroom portion of an mbuf.
+ */
+static void
+dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi)
+{
+ uint16_t i, j;
+
+ PMD_DRV_LOG(INFO, " MBUF: %p len %d, off: %d, seq: %" PRIU32 "\n", mbuf,
+ mbuf->pkt_len, mbuf->data_off, mbuf->seqn);
+ for (i = lo; i < hi; i += 16) {
+ uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);
+
+ PMD_DRV_LOG(INFO, " %6d: ", i);
+ for (j = 0; j < 16; j++)
+ PMD_DRV_LOG(INFO, " %02x", dp[j]);
+
+ PMD_DRV_LOG(INFO, "\n");
+ }
+}
diff --git a/drivers/net/ark/ark_ethdev_rx.h b/drivers/net/ark/ark_ethdev_rx.h
new file mode 100644
index 00000000..3a54a4c9
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_rx.h
@@ -0,0 +1,65 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_ETHDEV_RX_H_
+#define _ARK_ETHDEV_RX_H_
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_ethdev.h>
+
+
+int eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp);
+uint32_t eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id);
+int eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id);
+uint16_t eth_ark_recv_pkts_noop(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void eth_ark_dev_rx_queue_release(void *rx_queue);
+void eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_rx_queue_stats_reset(void *vqueue);
+void eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
+ const char *msg);
+void eth_ark_udm_force_close(struct rte_eth_dev *dev);
+
+#endif
diff --git a/drivers/net/ark/ark_ethdev_tx.c b/drivers/net/ark/ark_ethdev_tx.c
new file mode 100644
index 00000000..9ae7ae0e
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_tx.c
@@ -0,0 +1,468 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_ethdev_tx.h"
+#include "ark_global.h"
+#include "ark_mpu.h"
+#include "ark_ddm.h"
+#include "ark_ethdev.h"
+#include "ark_logs.h"
+
+#define ARK_TX_META_SIZE 32
+#define ARK_TX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_TX_META_SIZE)
+#define ARK_TX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
+
+
+/* ************************************************************************* */
+struct ark_tx_queue {
+ struct ark_tx_meta *meta_q;
+ struct rte_mbuf **bufs;
+
+ /* handles for hw objects */
+ struct ark_mpu_t *mpu;
+ struct ark_ddm_t *ddm;
+
+ /* Stats HW tracks bytes and packets, need to count send errors */
+ uint64_t tx_errors;
+
+ uint32_t queue_size;
+ uint32_t queue_mask;
+
+ /* 3 indexes to the paired data rings. */
+ uint32_t prod_index; /* where to put the next one */
+ uint32_t free_index; /* mbuf has been freed */
+
+ /* The queue Id is used to identify the HW Q */
+ uint16_t phys_qid;
+ /* The queue Index within the dpdk device structures */
+ uint16_t queue_index;
+
+ uint32_t pad[1];
+
+ /* second cache line - fields only used in slow path */
+ MARKER cacheline1 __rte_cache_min_aligned;
+ uint32_t cons_index; /* hw is done, can be freed */
+} __rte_cache_aligned;
+
+/* Forward declarations */
+static uint32_t eth_ark_tx_jumbo(struct ark_tx_queue *queue,
+ struct rte_mbuf *mbuf);
+static int eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue);
+static void free_completed_tx(struct ark_tx_queue *queue);
+
+static inline void
+ark_tx_hw_queue_stop(struct ark_tx_queue *queue)
+{
+ ark_mpu_stop(queue->mpu);
+}
+
+/* ************************************************************************* */
+static inline void
+eth_ark_tx_meta_from_mbuf(struct ark_tx_meta *meta,
+ const struct rte_mbuf *mbuf,
+ uint8_t flags)
+{
+ meta->physaddr = rte_mbuf_data_dma_addr(mbuf);
+ meta->delta_ns = 0;
+ meta->data_len = rte_pktmbuf_data_len(mbuf);
+ meta->flags = flags;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts_noop(void *vtxq __rte_unused,
+ struct rte_mbuf **tx_pkts __rte_unused,
+ uint16_t nb_pkts __rte_unused)
+{
+ return 0;
+}
+
+/* ************************************************************************* */
+uint16_t
+eth_ark_xmit_pkts(void *vtxq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct ark_tx_queue *queue;
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+
+ uint32_t idx;
+ uint32_t prod_index_limit;
+ int stat;
+ uint16_t nb;
+
+ queue = (struct ark_tx_queue *)vtxq;
+
+ /* free any packets after the HW is done with them */
+ free_completed_tx(queue);
+
+ prod_index_limit = queue->queue_size + queue->free_index;
+
+ for (nb = 0;
+ (nb < nb_pkts) && (queue->prod_index != prod_index_limit);
+ ++nb) {
+ mbuf = tx_pkts[nb];
+
+ if (ARK_TX_PAD_TO_60) {
+ if (unlikely(rte_pktmbuf_pkt_len(mbuf) < 60)) {
+ /* this packet even if it is small can be split,
+ * be sure to add to the end mbuf
+ */
+ uint16_t to_add =
+ 60 - rte_pktmbuf_pkt_len(mbuf);
+ char *appended =
+ rte_pktmbuf_append(mbuf, to_add);
+
+ if (appended == 0) {
+ /* This packet is in error,
+ * we cannot send it so just
+ * count it and delete it.
+ */
+ queue->tx_errors += 1;
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+ memset(appended, 0, to_add);
+ }
+ }
+
+ if (unlikely(mbuf->nb_segs != 1)) {
+ stat = eth_ark_tx_jumbo(queue, mbuf);
+ if (unlikely(stat != 0))
+ break; /* Queue is full */
+ } else {
+ idx = queue->prod_index & queue->queue_mask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->meta_q[idx];
+ eth_ark_tx_meta_from_mbuf(meta,
+ mbuf,
+ ARK_DDM_SOP |
+ ARK_DDM_EOP);
+ queue->prod_index++;
+ }
+ }
+
+ if (ARK_TX_DEBUG && (nb != nb_pkts)) {
+ PMD_TX_LOG(DEBUG, "TX: Failure to send:"
+ " req: %" PRIU32
+ " sent: %" PRIU32
+ " prod: %" PRIU32
+ " cons: %" PRIU32
+ " free: %" PRIU32 "\n",
+ nb_pkts, nb,
+ queue->prod_index,
+ queue->cons_index,
+ queue->free_index);
+ ark_mpu_dump(queue->mpu,
+ "TX Failure MPU: ",
+ queue->phys_qid);
+ }
+
+ /* let FPGA know producer index. */
+ if (likely(nb != 0))
+ ark_mpu_set_producer(queue->mpu, queue->prod_index);
+
+ return nb;
+}
+
+/* ************************************************************************* */
+static uint32_t
+eth_ark_tx_jumbo(struct ark_tx_queue *queue, struct rte_mbuf *mbuf)
+{
+ struct rte_mbuf *next;
+ struct ark_tx_meta *meta;
+ uint32_t free_queue_space;
+ uint32_t idx;
+ uint8_t flags = ARK_DDM_SOP;
+
+ free_queue_space = queue->queue_mask -
+ (queue->prod_index - queue->free_index);
+ if (unlikely(free_queue_space < mbuf->nb_segs))
+ return -1;
+
+ while (mbuf != NULL) {
+ next = mbuf->next;
+
+ idx = queue->prod_index & queue->queue_mask;
+ queue->bufs[idx] = mbuf;
+ meta = &queue->meta_q[idx];
+
+ flags |= (next == NULL) ? ARK_DDM_EOP : 0;
+ eth_ark_tx_meta_from_mbuf(meta, mbuf, flags);
+ queue->prod_index++;
+
+ flags &= ~ARK_DDM_SOP; /* drop SOP flags */
+ mbuf = next;
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct ark_adapter *ark = (struct ark_adapter *)dev->data->dev_private;
+ struct ark_tx_queue *queue;
+ int status;
+
+ /* Future: divide the Q's evenly with multi-ports */
+ int port = dev->data->port_id;
+ int qidx = port + queue_idx;
+
+ if (!rte_is_power_of_2(nb_desc)) {
+ PMD_DRV_LOG(ERR,
+ "DPDK Arkville configuration queue size"
+ " must be power of two %u (%s)\n",
+ nb_desc, __func__);
+ return -1;
+ }
+
+ /* Allocate queue struct */
+ queue = rte_zmalloc_socket("Ark_txqueue",
+ sizeof(struct ark_tx_queue),
+ 64,
+ socket_id);
+ if (queue == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate tx "
+ "queue memory in %s\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ /* we use zmalloc no need to initialize fields */
+ queue->queue_size = nb_desc;
+ queue->queue_mask = nb_desc - 1;
+ queue->phys_qid = qidx;
+ queue->queue_index = queue_idx;
+ dev->data->tx_queues[queue_idx] = queue;
+
+ queue->meta_q =
+ rte_zmalloc_socket("Ark_txqueue meta",
+ nb_desc * sizeof(struct ark_tx_meta),
+ 64,
+ socket_id);
+ queue->bufs =
+ rte_zmalloc_socket("Ark_txqueue bufs",
+ nb_desc * sizeof(struct rte_mbuf *),
+ 64,
+ socket_id);
+
+ if (queue->meta_q == 0 || queue->bufs == 0) {
+ PMD_DRV_LOG(ERR, "Failed to allocate "
+ "queue memory in %s\n", __func__);
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -ENOMEM;
+ }
+
+ queue->ddm = RTE_PTR_ADD(ark->ddm.v, qidx * ARK_DDM_QOFFSET);
+ queue->mpu = RTE_PTR_ADD(ark->mputx.v, qidx * ARK_MPU_QOFFSET);
+
+ status = eth_ark_tx_hw_queue_config(queue);
+
+ if (unlikely(status != 0)) {
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+ return -1; /* ERROR CODE */
+ }
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static int
+eth_ark_tx_hw_queue_config(struct ark_tx_queue *queue)
+{
+ phys_addr_t queue_base, ring_base, cons_index_addr;
+ uint32_t write_interval_ns;
+
+ /* Verify HW -- MPU */
+ if (ark_mpu_verify(queue->mpu, sizeof(struct ark_tx_meta)))
+ return -1;
+
+ queue_base = rte_malloc_virt2phy(queue);
+ ring_base = rte_malloc_virt2phy(queue->meta_q);
+ cons_index_addr =
+ queue_base + offsetof(struct ark_tx_queue, cons_index);
+
+ ark_mpu_stop(queue->mpu);
+ ark_mpu_reset(queue->mpu);
+
+ /* Stop and Reset and configure MPU */
+ ark_mpu_configure(queue->mpu, ring_base, queue->queue_size, 1);
+
+ /*
+ * Adjust the write interval based on queue size --
+ * increase pcie traffic when low mbuf count
+ * Queue sizes less than 128 are not allowed
+ */
+ switch (queue->queue_size) {
+ case 128:
+ write_interval_ns = 500;
+ break;
+ case 256:
+ write_interval_ns = 500;
+ break;
+ case 512:
+ write_interval_ns = 1000;
+ break;
+ default:
+ write_interval_ns = 2000;
+ break;
+ }
+
+ /* Completion address in UDM */
+ ark_ddm_setup(queue->ddm, cons_index_addr, write_interval_ns);
+
+ return 0;
+}
+
+/* ************************************************************************* */
+void
+eth_ark_tx_queue_release(void *vtx_queue)
+{
+ struct ark_tx_queue *queue;
+
+ queue = (struct ark_tx_queue *)vtx_queue;
+
+ ark_tx_hw_queue_stop(queue);
+
+ queue->cons_index = queue->prod_index;
+ free_completed_tx(queue);
+
+ rte_free(queue->meta_q);
+ rte_free(queue->bufs);
+ rte_free(queue);
+}
+
+/* ************************************************************************* */
+int
+eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+ int cnt = 0;
+
+ queue = dev->data->tx_queues[queue_id];
+
+ /* Wait for DDM to send out all packets. */
+ while (queue->cons_index != queue->prod_index) {
+ usleep(100);
+ if (cnt++ > 10000)
+ return -1;
+ }
+
+ ark_mpu_stop(queue->mpu);
+ free_completed_tx(queue);
+
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+ return 0;
+}
+
+int
+eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct ark_tx_queue *queue;
+
+ queue = dev->data->tx_queues[queue_id];
+ if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_STARTED)
+ return 0;
+
+ ark_mpu_start(queue->mpu);
+ dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+}
+
+/* ************************************************************************* */
+static void
+free_completed_tx(struct ark_tx_queue *queue)
+{
+ struct rte_mbuf *mbuf;
+ struct ark_tx_meta *meta;
+ uint32_t top_index;
+
+ top_index = queue->cons_index; /* read once */
+ while (queue->free_index != top_index) {
+ meta = &queue->meta_q[queue->free_index & queue->queue_mask];
+ mbuf = queue->bufs[queue->free_index & queue->queue_mask];
+
+ if (likely((meta->flags & ARK_DDM_SOP) != 0)) {
+ /* ref count of the mbuf is checked in this call. */
+ rte_pktmbuf_free(mbuf);
+ }
+ queue->free_index++;
+ }
+}
+
+/* ************************************************************************* */
+void
+eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+ uint64_t bytes, pkts;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ bytes = ark_ddm_queue_byte_count(ddm);
+ pkts = ark_ddm_queue_pkt_count(ddm);
+
+ stats->q_opackets[queue->queue_index] = pkts;
+ stats->q_obytes[queue->queue_index] = bytes;
+ stats->opackets += pkts;
+ stats->obytes += bytes;
+ stats->oerrors += queue->tx_errors;
+}
+
+void
+eth_tx_queue_stats_reset(void *vqueue)
+{
+ struct ark_tx_queue *queue;
+ struct ark_ddm_t *ddm;
+
+ queue = vqueue;
+ ddm = queue->ddm;
+
+ ark_ddm_queue_reset_stats(ddm);
+ queue->tx_errors = 0;
+}
diff --git a/drivers/net/ark/ark_ethdev_tx.h b/drivers/net/ark/ark_ethdev_tx.h
new file mode 100644
index 00000000..8aaafc22
--- /dev/null
+++ b/drivers/net/ark/ark_ethdev_tx.h
@@ -0,0 +1,59 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_ETHDEV_TX_H_
+#define _ARK_ETHDEV_TX_H_
+
+#include <stdint.h>
+
+#include <rte_ethdev.h>
+
+
+uint16_t eth_ark_xmit_pkts_noop(void *vtxq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t eth_ark_xmit_pkts(void *vtxq,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+int eth_ark_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void eth_ark_tx_queue_release(void *vtx_queue);
+int eth_ark_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id);
+int eth_ark_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id);
+void eth_tx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats);
+void eth_tx_queue_stats_reset(void *vqueue);
+
+#endif
diff --git a/drivers/net/ark/ark_ext.h b/drivers/net/ark/ark_ext.h
new file mode 100644
index 00000000..f805f64f
--- /dev/null
+++ b/drivers/net/ark/ark_ext.h
@@ -0,0 +1,115 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_EXT_H_
+#define _ARK_EXT_H_
+
+#include <rte_ethdev.h>
+
+/*
+ * This is the template file for users who which to define a dynamic
+ * extension to the Arkville PMD. User's who create an extension
+ * should include this file and define the necessary and desired
+ * functions.
+ * Only 1 function is required for an extension, dev_init(); all other
+ * functions prototyped in this file are optional.
+ */
+
+/*
+ * Called post PMD init.
+ * The implementation returns its private data that gets passed into
+ * all other functions as user_data
+ * The ARK extension implementation MUST implement this function
+ */
+void *dev_init(struct rte_eth_dev *dev, void *a_bar, int port_id);
+
+/* Called during device shutdown */
+void dev_uninit(struct rte_eth_dev *dev, void *user_data);
+
+/* This call is optional and allows the
+ * extension to specify the number of supported ports.
+ */
+uint8_t dev_get_port_count(struct rte_eth_dev *dev,
+ void *user_data);
+
+/*
+ * The following functions are optional and are directly mapped
+ * from the DPDK PMD ops structure.
+ * Each function if implemented is called after the ARK PMD
+ * implementation executes.
+ */
+
+int dev_configure(struct rte_eth_dev *dev,
+ void *user_data);
+
+int dev_start(struct rte_eth_dev *dev,
+ void *user_data);
+
+void dev_stop(struct rte_eth_dev *dev,
+ void *user_data);
+
+void dev_close(struct rte_eth_dev *dev,
+ void *user_data);
+
+int link_update(struct rte_eth_dev *dev,
+ int wait_to_complete,
+ void *user_data);
+
+int dev_set_link_up(struct rte_eth_dev *dev,
+ void *user_data);
+
+int dev_set_link_down(struct rte_eth_dev *dev,
+ void *user_data);
+
+void stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats,
+ void *user_data);
+
+void stats_reset(struct rte_eth_dev *dev,
+ void *user_data);
+
+void mac_addr_add(struct rte_eth_dev *dev,
+ struct ether_addr *macadr,
+ uint32_t index,
+ uint32_t pool,
+ void *user_data);
+
+void mac_addr_remove(struct rte_eth_dev *dev,
+ uint32_t index,
+ void *user_data);
+
+void mac_addr_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ void *user_data);
+
+#endif
diff --git a/drivers/net/ark/ark_global.h b/drivers/net/ark/ark_global.h
new file mode 100644
index 00000000..a2e9e8ff
--- /dev/null
+++ b/drivers/net/ark/ark_global.h
@@ -0,0 +1,161 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_GLOBAL_H_
+#define _ARK_GLOBAL_H_
+
+#include <time.h>
+#include <assert.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_version.h>
+
+#include "ark_pktdir.h"
+#include "ark_pktgen.h"
+#include "ark_pktchkr.h"
+
+#define ETH_ARK_ARG_MAXLEN 64
+#define ARK_SYSCTRL_BASE 0x0
+#define ARK_PKTGEN_BASE 0x10000
+#define ARK_MPU_RX_BASE 0x20000
+#define ARK_UDM_BASE 0x30000
+#define ARK_MPU_TX_BASE 0x40000
+#define ARK_DDM_BASE 0x60000
+#define ARK_CMAC_BASE 0x80000
+#define ARK_PKTDIR_BASE 0xa0000
+#define ARK_PKTCHKR_BASE 0x90000
+#define ARK_RCPACING_BASE 0xb0000
+#define ARK_EXTERNAL_BASE 0x100000
+#define ARK_MPU_QOFFSET 0x00100
+#define ARK_MAX_PORTS 8
+
+#define offset8(n) n
+#define offset16(n) ((n) / 2)
+#define offset32(n) ((n) / 4)
+#define offset64(n) ((n) / 8)
+
+/* Maximum length of arg list in bytes */
+#define ARK_MAX_ARG_LEN 256
+
+/*
+ * Structure to store private data for each PF/VF instance.
+ */
+#define def_ptr(type, name) \
+ union type { \
+ uint64_t *t64; \
+ uint32_t *t32; \
+ uint16_t *t16; \
+ uint8_t *t8; \
+ void *v; \
+ } name
+
+struct ark_user_ext {
+ void *(*dev_init)(struct rte_eth_dev *, void *abar, int port_id);
+ void (*dev_uninit)(struct rte_eth_dev *, void *);
+ int (*dev_get_port_count)(struct rte_eth_dev *, void *);
+ int (*dev_configure)(struct rte_eth_dev *, void *);
+ int (*dev_start)(struct rte_eth_dev *, void *);
+ void (*dev_stop)(struct rte_eth_dev *, void *);
+ void (*dev_close)(struct rte_eth_dev *, void *);
+ int (*link_update)(struct rte_eth_dev *, int wait_to_complete, void *);
+ int (*dev_set_link_up)(struct rte_eth_dev *, void *);
+ int (*dev_set_link_down)(struct rte_eth_dev *, void *);
+ void (*stats_get)(struct rte_eth_dev *, struct rte_eth_stats *, void *);
+ void (*stats_reset)(struct rte_eth_dev *, void *);
+ void (*mac_addr_add)(struct rte_eth_dev *,
+ struct ether_addr *,
+ uint32_t,
+ uint32_t,
+ void *);
+ void (*mac_addr_remove)(struct rte_eth_dev *, uint32_t, void *);
+ void (*mac_addr_set)(struct rte_eth_dev *, struct ether_addr *, void *);
+};
+
+struct ark_adapter {
+ /* User extension private data */
+ void *user_data;
+
+ /* Pointers to packet generator and checker */
+ int start_pg;
+ ark_pkt_gen_t pg;
+ ark_pkt_chkr_t pc;
+ ark_pkt_dir_t pd;
+
+ int num_ports;
+
+ /* Packet generator/checker args */
+ char pkt_gen_args[ARK_MAX_ARG_LEN];
+ char pkt_chkr_args[ARK_MAX_ARG_LEN];
+ uint32_t pkt_dir_v;
+
+ /* eth device */
+ struct rte_eth_dev *eth_dev;
+
+ void *d_handle;
+ struct ark_user_ext user_ext;
+
+ /* Our Bar 0 */
+ uint8_t *bar0;
+
+ /* Application Bar */
+ uint8_t *a_bar;
+
+ /* Arkville demo block offsets */
+ def_ptr(sys_ctrl, sysctrl);
+ def_ptr(pkt_gen, pktgen);
+ def_ptr(mpu_rx, mpurx);
+ def_ptr(UDM, udm);
+ def_ptr(mpu_tx, mputx);
+ def_ptr(DDM, ddm);
+ def_ptr(CMAC, cmac);
+ def_ptr(external, external);
+ def_ptr(pkt_dir, pktdir);
+ def_ptr(pkt_chkr, pktchkr);
+
+ int started;
+ uint16_t rx_queues;
+ uint16_t tx_queues;
+
+ struct ark_rqpace_t *rqpacing;
+};
+
+typedef uint32_t *ark_t;
+
+#endif
diff --git a/drivers/net/ark/ark_logs.h b/drivers/net/ark/ark_logs.h
new file mode 100644
index 00000000..8aff2963
--- /dev/null
+++ b/drivers/net/ark/ark_logs.h
@@ -0,0 +1,119 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_DEBUG_H_
+#define _ARK_DEBUG_H_
+
+#include <inttypes.h>
+#include <rte_log.h>
+
+
+/* Configuration option to pad TX packets to 60 bytes */
+#ifdef RTE_LIBRTE_ARK_PAD_TX
+#define ARK_TX_PAD_TO_60 1
+#else
+#define ARK_TX_PAD_TO_60 0
+#endif
+
+/* system camel case definition changed to upper case */
+#define PRIU32 PRIu32
+#define PRIU64 PRIu64
+
+/* Format specifiers for string data pairs */
+#define ARK_SU32 "\n\t%-20s %'20" PRIU32
+#define ARK_SU64 "\n\t%-20s %'20" PRIU64
+#define ARK_SU64X "\n\t%-20s %#20" PRIx64
+#define ARK_SPTR "\n\t%-20s %20p"
+
+
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, fmt, ## args)
+
+/* Conditional trace definitions */
+#define ARK_TRACE_ON(level, fmt, ...) \
+ RTE_LOG(level, PMD, fmt, ##__VA_ARGS__)
+
+/* This pattern allows compiler check arguments even if disabled */
+#define ARK_TRACE_OFF(level, fmt, ...) \
+ do {if (0) RTE_LOG(level, PMD, fmt, ##__VA_ARGS__); } \
+ while (0)
+
+
+/* tracing including the function name */
+#define ARK_FUNC_ON(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+
+/* tracing including the function name */
+#define ARK_FUNC_OFF(level, fmt, args...) \
+ do { if (0) RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args); } \
+ while (0)
+
+
+/* Debug macro for tracing full behavior, function tracing and messages*/
+#ifdef RTE_LIBRTE_ARK_DEBUG_TRACE
+#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_ON(level, fmt, ##__VA_ARGS__)
+#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define PMD_FUNC_LOG(level, fmt, ...) ARK_FUNC_OFF(level, fmt, ##__VA_ARGS__)
+#define PMD_DEBUG_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+
+/* Debug macro for reporting FPGA statistics */
+#ifdef RTE_LIBRTE_ARK_DEBUG_STATS
+#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define PMD_STATS_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+
+/* Debug macro for RX path */
+#ifdef RTE_LIBRTE_ARK_DEBUG_RX
+#define ARK_RX_DEBUG 1
+#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define ARK_RX_DEBUG 0
+#define PMD_RX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+/* Debug macro for TX path */
+#ifdef RTE_LIBRTE_ARK_DEBUG_TX
+#define ARK_TX_DEBUG 1
+#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_ON(level, fmt, ##__VA_ARGS__)
+#else
+#define ARK_TX_DEBUG 0
+#define PMD_TX_LOG(level, fmt, ...) ARK_TRACE_OFF(level, fmt, ##__VA_ARGS__)
+#endif
+
+#endif
diff --git a/drivers/net/ark/ark_mpu.c b/drivers/net/ark/ark_mpu.c
new file mode 100644
index 00000000..cd2c0788
--- /dev/null
+++ b/drivers/net/ark/ark_mpu.c
@@ -0,0 +1,181 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_mpu.h"
+
+uint16_t
+ark_api_num_queues(struct ark_mpu_t *mpu)
+{
+ return mpu->hw.num_queues;
+}
+
+uint16_t
+ark_api_num_queues_per_port(struct ark_mpu_t *mpu, uint16_t ark_ports)
+{
+ return mpu->hw.num_queues / ark_ports;
+}
+
+int
+ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size)
+{
+ uint32_t version;
+
+ version = mpu->id.vernum & 0x0000fF00;
+ if ((mpu->id.idnum != 0x2055504d) ||
+ (mpu->hw.obj_size != obj_size) ||
+ (version != 0x00003100)) {
+ PMD_DRV_LOG(ERR,
+ " MPU module not found as expected %08x"
+ " \"%c%c%c%c %c%c%c%c\"\n",
+ mpu->id.idnum,
+ mpu->id.id[0], mpu->id.id[1],
+ mpu->id.id[2], mpu->id.id[3],
+ mpu->id.ver[0], mpu->id.ver[1],
+ mpu->id.ver[2], mpu->id.ver[3]);
+ PMD_DRV_LOG(ERR,
+ " MPU HW num_queues: %u hw_depth %u,"
+ " obj_size: %u, obj_per_mrr: %u"
+ " Expected size %u\n",
+ mpu->hw.num_queues,
+ mpu->hw.hw_depth,
+ mpu->hw.obj_size,
+ mpu->hw.obj_per_mrr,
+ obj_size);
+ return -1;
+ }
+ return 0;
+}
+
+void
+ark_mpu_stop(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_STOP;
+}
+
+void
+ark_mpu_start(struct ark_mpu_t *mpu)
+{
+ mpu->cfg.command = MPU_CMD_RUN;
+}
+
+int
+ark_mpu_reset(struct ark_mpu_t *mpu)
+{
+ int cnt = 0;
+
+ mpu->cfg.command = MPU_CMD_RESET;
+
+ while (mpu->cfg.command != MPU_CMD_IDLE) {
+ if (cnt++ > 1000)
+ break;
+ usleep(10);
+ }
+ if (mpu->cfg.command != MPU_CMD_IDLE) {
+ mpu->cfg.command = MPU_CMD_FORCE_RESET;
+ usleep(10);
+ }
+ ark_mpu_reset_stats(mpu);
+ return mpu->cfg.command != MPU_CMD_IDLE;
+}
+
+void
+ark_mpu_reset_stats(struct ark_mpu_t *mpu)
+{
+ mpu->stats.pci_request = 1; /* reset stats */
+}
+
+int
+ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring, uint32_t ring_size,
+ int is_tx)
+{
+ ark_mpu_reset(mpu);
+
+ if (!rte_is_power_of_2(ring_size)) {
+ PMD_DRV_LOG(ERR, "ARK: Invalid ring size for MPU %d\n",
+ ring_size);
+ return -1;
+ }
+
+ mpu->cfg.ring_base = ring;
+ mpu->cfg.ring_size = ring_size;
+ mpu->cfg.ring_mask = ring_size - 1;
+ mpu->cfg.min_host_move = is_tx ? 1 : mpu->hw.obj_per_mrr;
+ mpu->cfg.min_hw_move = mpu->hw.obj_per_mrr;
+ mpu->cfg.sw_prod_index = 0;
+ mpu->cfg.hw_cons_index = 0;
+ return 0;
+}
+
+void
+ark_mpu_dump(struct ark_mpu_t *mpu, const char *code, uint16_t qid)
+{
+ /* DUMP to see that we have started */
+ PMD_DEBUG_LOG(DEBUG, "MPU: %s Q: %3u sw_prod %u, hw_cons: %u\n",
+ code, qid,
+ mpu->cfg.sw_prod_index, mpu->cfg.hw_cons_index);
+ PMD_DEBUG_LOG(DEBUG, "MPU: %s state: %d count %d, reserved %d"
+ " data 0x%08x_%08x 0x%08x_%08x\n",
+ code,
+ mpu->debug.state, mpu->debug.count,
+ mpu->debug.reserved,
+ mpu->debug.peek[1],
+ mpu->debug.peek[0],
+ mpu->debug.peek[3],
+ mpu->debug.peek[2]
+ );
+ PMD_STATS_LOG(INFO, "MPU: %s Q: %3u"
+ ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64
+ ARK_SU64 ARK_SU64 ARK_SU64 "\n",
+ code, qid,
+ "PCI Request:", mpu->stats.pci_request,
+ "Queue_empty", mpu->stats.q_empty,
+ "Queue_q1", mpu->stats.q_q1,
+ "Queue_q2", mpu->stats.q_q2,
+ "Queue_q3", mpu->stats.q_q3,
+ "Queue_q4", mpu->stats.q_q4,
+ "Queue_full", mpu->stats.q_full
+ );
+}
+
+void
+ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t q_id)
+{
+ PMD_DEBUG_LOG(DEBUG, "MPU Setup Q: %u"
+ ARK_SU64X "\n",
+ q_id,
+ "ring_base", mpu->cfg.ring_base
+ );
+}
diff --git a/drivers/net/ark/ark_mpu.h b/drivers/net/ark/ark_mpu.h
new file mode 100644
index 00000000..a0171dbd
--- /dev/null
+++ b/drivers/net/ark/ark_mpu.h
@@ -0,0 +1,154 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_MPU_H_
+#define _ARK_MPU_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The MPU or Memory Prefetch Unit is an internal Arkville hardware
+ * module for moving data between host memory and the hardware FPGA.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * MPU hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+
+#define ARK_MPU_ID 0x00
+struct ark_mpu_id_t {
+ union {
+ char id[4];
+ uint32_t idnum;
+ };
+ union {
+ char ver[4];
+ uint32_t vernum;
+ };
+ uint32_t phys_id;
+ uint32_t mrr_code;
+};
+
+#define ARK_MPU_HW 0x010
+struct ark_mpu_hw_t {
+ uint16_t num_queues;
+ uint16_t reserved;
+ uint32_t hw_depth;
+ uint32_t obj_size;
+ uint32_t obj_per_mrr;
+};
+
+#define ARK_MPU_CFG 0x040
+struct ark_mpu_cfg_t {
+ phys_addr_t ring_base; /* phys_addr_t is a uint64_t */
+ uint32_t ring_size;
+ uint32_t ring_mask;
+ uint32_t min_host_move;
+ uint32_t min_hw_move;
+ volatile uint32_t sw_prod_index;
+ volatile uint32_t hw_cons_index;
+ volatile uint32_t command;
+};
+enum ARK_MPU_COMMAND {
+ MPU_CMD_IDLE = 1,
+ MPU_CMD_RUN = 2,
+ MPU_CMD_STOP = 4,
+ MPU_CMD_RESET = 8,
+ MPU_CMD_FORCE_RESET = 16,
+ MPU_COMMAND_LIMIT = 0xfFFFFFFF
+};
+
+#define ARK_MPU_STATS 0x080
+struct ark_mpu_stats_t {
+ volatile uint64_t pci_request;
+ volatile uint64_t q_empty;
+ volatile uint64_t q_q1;
+ volatile uint64_t q_q2;
+ volatile uint64_t q_q3;
+ volatile uint64_t q_q4;
+ volatile uint64_t q_full;
+};
+
+#define ARK_MPU_DEBUG 0x0C0
+struct ark_mpu_debug_t {
+ volatile uint32_t state;
+ uint32_t reserved;
+ volatile uint32_t count;
+ volatile uint32_t take;
+ volatile uint32_t peek[4];
+};
+
+/* Consolidated structure */
+struct ark_mpu_t {
+ struct ark_mpu_id_t id;
+ uint8_t reserved0[(ARK_MPU_HW - ARK_MPU_ID)
+ - sizeof(struct ark_mpu_id_t)];
+ struct ark_mpu_hw_t hw;
+ uint8_t reserved1[(ARK_MPU_CFG - ARK_MPU_HW) -
+ sizeof(struct ark_mpu_hw_t)];
+ struct ark_mpu_cfg_t cfg;
+ uint8_t reserved2[(ARK_MPU_STATS - ARK_MPU_CFG) -
+ sizeof(struct ark_mpu_cfg_t)];
+ struct ark_mpu_stats_t stats;
+ uint8_t reserved3[(ARK_MPU_DEBUG - ARK_MPU_STATS) -
+ sizeof(struct ark_mpu_stats_t)];
+ struct ark_mpu_debug_t debug;
+};
+
+uint16_t ark_api_num_queues(struct ark_mpu_t *mpu);
+uint16_t ark_api_num_queues_per_port(struct ark_mpu_t *mpu,
+ uint16_t ark_ports);
+int ark_mpu_verify(struct ark_mpu_t *mpu, uint32_t obj_size);
+void ark_mpu_stop(struct ark_mpu_t *mpu);
+void ark_mpu_start(struct ark_mpu_t *mpu);
+int ark_mpu_reset(struct ark_mpu_t *mpu);
+int ark_mpu_configure(struct ark_mpu_t *mpu, phys_addr_t ring,
+ uint32_t ring_size, int is_tx);
+
+void ark_mpu_dump(struct ark_mpu_t *mpu, const char *msg, uint16_t idx);
+void ark_mpu_dump_setup(struct ark_mpu_t *mpu, uint16_t qid);
+void ark_mpu_reset_stats(struct ark_mpu_t *mpu);
+
+/* this action is in a performance critical path */
+static inline void
+ark_mpu_set_producer(struct ark_mpu_t *mpu, uint32_t idx)
+{
+ mpu->cfg.sw_prod_index = idx;
+}
+
+#endif
diff --git a/drivers/net/ark/ark_pktchkr.c b/drivers/net/ark/ark_pktchkr.c
new file mode 100644
index 00000000..62b3673b
--- /dev/null
+++ b/drivers/net/ark/ark_pktchkr.c
@@ -0,0 +1,474 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "ark_pktchkr.h"
+#include "ark_logs.h"
+
+static int set_arg(char *arg, char *val);
+static int ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle);
+
+#define ARK_MAX_STR_LEN 64
+union OPTV {
+ int INT;
+ int BOOL;
+ uint64_t LONG;
+ char STR[ARK_MAX_STR_LEN];
+};
+
+enum OPTYPE {
+ OTINT,
+ OTLONG,
+ OTBOOL,
+ OTSTRING
+};
+
+struct OPTIONS {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPTYPE t;
+ union OPTV v;
+};
+
+static struct OPTIONS toptions[] = {
+ {{"configure"}, OTBOOL, {1} },
+ {{"port"}, OTINT, {0} },
+ {{"mac-dump"}, OTBOOL, {0} },
+ {{"dg-mode"}, OTBOOL, {1} },
+ {{"run"}, OTBOOL, {0} },
+ {{"stop"}, OTBOOL, {0} },
+ {{"dump"}, OTBOOL, {0} },
+ {{"en_resync"}, OTBOOL, {0} },
+ {{"tuser_err_val"}, OTINT, {1} },
+ {{"gen_forever"}, OTBOOL, {0} },
+ {{"en_slaved_start"}, OTBOOL, {0} },
+ {{"vary_length"}, OTBOOL, {0} },
+ {{"incr_payload"}, OTINT, {0} },
+ {{"incr_first_byte"}, OTBOOL, {0} },
+ {{"ins_seq_num"}, OTBOOL, {0} },
+ {{"ins_time_stamp"}, OTBOOL, {1} },
+ {{"ins_udp_hdr"}, OTBOOL, {0} },
+ {{"num_pkts"}, OTLONG, .v.LONG = 10000000000000L},
+ {{"payload_byte"}, OTINT, {0x55} },
+ {{"pkt_spacing"}, OTINT, {60} },
+ {{"pkt_size_min"}, OTINT, {2005} },
+ {{"pkt_size_max"}, OTINT, {1514} },
+ {{"pkt_size_incr"}, OTINT, {1} },
+ {{"eth_type"}, OTINT, {0x0800} },
+ {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L},
+ {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L},
+ {{"hdr_dW0"}, OTINT, {0x0016e319} },
+ {{"hdr_dW1"}, OTINT, {0x27150004} },
+ {{"hdr_dW2"}, OTINT, {0x76967bda} },
+ {{"hdr_dW3"}, OTINT, {0x08004500} },
+ {{"hdr_dW4"}, OTINT, {0x005276ed} },
+ {{"hdr_dW5"}, OTINT, {0x40004006} },
+ {{"hdr_dW6"}, OTINT, {0x56cfc0a8} },
+ {{"start_offset"}, OTINT, {0} },
+ {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"},
+ {{"dst_port"}, OTINT, {65536} },
+ {{"src_port"}, OTINT, {65536} },
+};
+
+ark_pkt_chkr_t
+ark_pktchkr_init(void *addr, int ord, int l2_mode)
+{
+ struct ark_pkt_chkr_inst *inst =
+ rte_malloc("ark_pkt_chkr_inst",
+ sizeof(struct ark_pkt_chkr_inst), 0);
+ inst->sregs = (struct ark_pkt_chkr_stat_regs *)addr;
+ inst->cregs =
+ (struct ark_pkt_chkr_ctl_regs *)(((uint8_t *)addr) + 0x100);
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pktchkr_uninit(ark_pkt_chkr_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pktchkr_run(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->sregs->pkt_start_stop = 0;
+ inst->sregs->pkt_start_stop = 0x1;
+}
+
+int
+ark_pktchkr_stopped(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->sregs->pkt_start_stop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pktchkr_stop(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ int wait_cycle = 10;
+
+ inst->sregs->pkt_start_stop = 0;
+ while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG, "Waiting for pktchk %d to stop...\n",
+ inst->ordinal);
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktchk %d stopped.\n", inst->ordinal);
+}
+
+int
+ark_pktchkr_is_running(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->sregs->pkt_start_stop;
+
+ return ((r & 1) == 1);
+}
+
+static void
+ark_pktchkr_set_pkt_ctrl(ark_pkt_chkr_t handle,
+ uint32_t gen_forever,
+ uint32_t vary_length,
+ uint32_t incr_payload,
+ uint32_t incr_first_byte,
+ uint32_t ins_seq_num,
+ uint32_t ins_udp_hdr,
+ uint32_t en_resync,
+ uint32_t tuser_err_val,
+ uint32_t ins_time_stamp)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = (tuser_err_val << 16) | (en_resync << 0);
+
+ inst->sregs->pkt_ctrl = r;
+ if (!inst->l2_mode)
+ ins_udp_hdr = 0;
+ r = ((gen_forever << 24) |
+ (vary_length << 16) |
+ (incr_payload << 12) |
+ (incr_first_byte << 8) |
+ (ins_time_stamp << 5) |
+ (ins_seq_num << 4) |
+ ins_udp_hdr);
+ inst->cregs->pkt_ctrl = r;
+}
+
+static
+int
+ark_pktchkr_is_gen_forever(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+ uint32_t r = inst->cregs->pkt_ctrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+int
+ark_pktchkr_wait_done(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ if (ark_pktchkr_is_gen_forever(handle)) {
+ PMD_DEBUG_LOG(ERR, "Pktchk wait_done will not terminate"
+ " because gen_forever=1\n");
+ return -1;
+ }
+ int wait_cycle = 10;
+
+ while (!ark_pktchkr_stopped(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG, "Waiting for packet checker %d's"
+ " internal pktgen to finish sending...\n",
+ inst->ordinal);
+ PMD_DEBUG_LOG(DEBUG, "Pktchk %d's pktgen done.\n",
+ inst->ordinal);
+ }
+ return 0;
+}
+
+int
+ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ return inst->cregs->pkts_sent;
+}
+
+void
+ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_payload = b;
+}
+
+void
+ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_min = x;
+}
+
+void
+ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_max = x;
+}
+
+void
+ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->pkt_size_incr = x;
+}
+
+void
+ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->num_pkts = x;
+}
+
+void
+ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->src_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->cregs->src_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->cregs->dst_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ inst->cregs->eth_type = x;
+}
+
+void
+ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ for (i = 0; i < 7; i++)
+ inst->cregs->hdr_dw[i] = hdr[i];
+}
+
+void
+ark_pktchkr_dump_stats(ark_pkt_chkr_t handle)
+{
+ struct ark_pkt_chkr_inst *inst = (struct ark_pkt_chkr_inst *)handle;
+
+ PMD_STATS_LOG(INFO, "pkts_rcvd = (%'u)\n",
+ inst->sregs->pkts_rcvd);
+ PMD_STATS_LOG(INFO, "bytes_rcvd = (%'" PRIU64 ")\n",
+ inst->sregs->bytes_rcvd);
+ PMD_STATS_LOG(INFO, "pkts_ok = (%'u)\n",
+ inst->sregs->pkts_ok);
+ PMD_STATS_LOG(INFO, "pkts_mismatch = (%'u)\n",
+ inst->sregs->pkts_mismatch);
+ PMD_STATS_LOG(INFO, "pkts_err = (%'u)\n",
+ inst->sregs->pkts_err);
+ PMD_STATS_LOG(INFO, "first_mismatch = (%'u)\n",
+ inst->sregs->first_mismatch);
+ PMD_STATS_LOG(INFO, "resync_events = (%'u)\n",
+ inst->sregs->resync_events);
+ PMD_STATS_LOG(INFO, "pkts_missing = (%'u)\n",
+ inst->sregs->pkts_missing);
+ PMD_STATS_LOG(INFO, "min_latency = (%'u)\n",
+ inst->sregs->min_latency);
+ PMD_STATS_LOG(INFO, "max_latency = (%'u)\n",
+ inst->sregs->max_latency);
+}
+
+static struct OPTIONS *
+options(const char *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) {
+ if (strcmp(id, toptions[i].opt) == 0)
+ return &toptions[i];
+ }
+ PMD_DRV_LOG(ERR,
+ "pktchkr: Could not find requested option!, option = %s\n",
+ id);
+ return NULL;
+}
+
+static int
+set_arg(char *arg, char *val)
+{
+ struct OPTIONS *o = options(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTINT:
+ case OTBOOL:
+ o->v.INT = atoi(val);
+ break;
+ case OTLONG:
+ o->v.INT = atoll(val);
+ break;
+ case OTSTRING:
+ strncpy(o->v.STR, val, ARK_MAX_STR_LEN);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,opt_n=v ..."
+ ******/
+void
+ark_pktchkr_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = "=\n\t\v\f \r";
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ while (argv && v) {
+ set_arg(argv, v);
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ }
+}
+
+static int32_t parse_ipv4_string(char const *ip_address);
+static int32_t
+parse_ipv4_string(char const *ip_address)
+{
+ unsigned int ip[4];
+
+ if (sscanf(ip_address, "%u.%u.%u.%u",
+ &ip[0], &ip[1], &ip[2], &ip[3]) != 4)
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+void
+ark_pktchkr_setup(ark_pkt_chkr_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR);
+
+ if (!options("stop")->v.BOOL && options("configure")->v.BOOL) {
+ ark_pktchkr_set_payload_byte(handle,
+ options("payload_byte")->v.INT);
+ ark_pktchkr_set_src_mac_addr(handle,
+ options("src_mac_addr")->v.INT);
+ ark_pktchkr_set_dst_mac_addr(handle,
+ options("dst_mac_addr")->v.LONG);
+
+ ark_pktchkr_set_eth_type(handle,
+ options("eth_type")->v.INT);
+ if (options("dg-mode")->v.BOOL) {
+ hdr[0] = options("hdr_dW0")->v.INT;
+ hdr[1] = options("hdr_dW1")->v.INT;
+ hdr[2] = options("hdr_dW2")->v.INT;
+ hdr[3] = options("hdr_dW3")->v.INT;
+ hdr[4] = options("hdr_dW4")->v.INT;
+ hdr[5] = options("hdr_dW5")->v.INT;
+ hdr[6] = options("hdr_dW6")->v.INT;
+ } else {
+ hdr[0] = dst_ip;
+ hdr[1] = options("dst_port")->v.INT;
+ hdr[2] = options("src_port")->v.INT;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pktchkr_set_hdr_dW(handle, hdr);
+ ark_pktchkr_set_num_pkts(handle,
+ options("num_pkts")->v.INT);
+ ark_pktchkr_set_pkt_size_min(handle,
+ options("pkt_size_min")->v.INT);
+ ark_pktchkr_set_pkt_size_max(handle,
+ options("pkt_size_max")->v.INT);
+ ark_pktchkr_set_pkt_size_incr(handle,
+ options("pkt_size_incr")->v.INT);
+ ark_pktchkr_set_pkt_ctrl(handle,
+ options("gen_forever")->v.BOOL,
+ options("vary_length")->v.BOOL,
+ options("incr_payload")->v.BOOL,
+ options("incr_first_byte")->v.BOOL,
+ options("ins_seq_num")->v.INT,
+ options("ins_udp_hdr")->v.BOOL,
+ options("en_resync")->v.BOOL,
+ options("tuser_err_val")->v.INT,
+ options("ins_time_stamp")->v.INT);
+ }
+
+ if (options("stop")->v.BOOL)
+ ark_pktchkr_stop(handle);
+
+ if (options("run")->v.BOOL) {
+ PMD_DEBUG_LOG(DEBUG, "Starting packet checker on port %d\n",
+ options("port")->v.INT);
+ ark_pktchkr_run(handle);
+ }
+}
diff --git a/drivers/net/ark/ark_pktchkr.h b/drivers/net/ark/ark_pktchkr.h
new file mode 100644
index 00000000..f4025dd6
--- /dev/null
+++ b/drivers/net/ark/ark_pktchkr.h
@@ -0,0 +1,117 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_PKTCHKR_H_
+#define _ARK_PKTCHKR_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#define ARK_PKTCHKR_BASE_ADR 0x90000
+
+typedef void *ark_pkt_chkr_t;
+
+/* The packet checker is an internal Arkville hardware module, which
+ * verifies packet streams generated from the corresponding packet
+ * generator. This module is used for Arkville testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_chkr_stat_regs {
+ uint32_t r0;
+ uint32_t pkt_start_stop;
+ uint32_t pkt_ctrl;
+ uint32_t pkts_rcvd;
+ uint64_t bytes_rcvd;
+ uint32_t pkts_ok;
+ uint32_t pkts_mismatch;
+ uint32_t pkts_err;
+ uint32_t first_mismatch;
+ uint32_t resync_events;
+ uint32_t pkts_missing;
+ uint32_t min_latency;
+ uint32_t max_latency;
+} __attribute__ ((packed));
+
+struct ark_pkt_chkr_ctl_regs {
+ uint32_t pkt_ctrl;
+ uint32_t pkt_payload;
+ uint32_t pkt_size_min;
+ uint32_t pkt_size_max;
+ uint32_t pkt_size_incr;
+ uint32_t num_pkts;
+ uint32_t pkts_sent;
+ uint32_t src_mac_addr_l;
+ uint32_t src_mac_addr_h;
+ uint32_t dst_mac_addr_l;
+ uint32_t dst_mac_addr_h;
+ uint32_t eth_type;
+ uint32_t hdr_dw[7];
+} __attribute__ ((packed));
+
+struct ark_pkt_chkr_inst {
+ struct rte_eth_dev_info *dev_info;
+ volatile struct ark_pkt_chkr_stat_regs *sregs;
+ volatile struct ark_pkt_chkr_ctl_regs *cregs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet checker functions */
+ark_pkt_chkr_t ark_pktchkr_init(void *addr, int ord, int l2_mode);
+void ark_pktchkr_uninit(ark_pkt_chkr_t handle);
+void ark_pktchkr_run(ark_pkt_chkr_t handle);
+int ark_pktchkr_stopped(ark_pkt_chkr_t handle);
+void ark_pktchkr_stop(ark_pkt_chkr_t handle);
+int ark_pktchkr_is_running(ark_pkt_chkr_t handle);
+int ark_pktchkr_get_pkts_sent(ark_pkt_chkr_t handle);
+void ark_pktchkr_set_payload_byte(ark_pkt_chkr_t handle, uint32_t b);
+void ark_pktchkr_set_pkt_size_min(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_pkt_size_max(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_pkt_size_incr(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_num_pkts(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_src_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+void ark_pktchkr_set_dst_mac_addr(ark_pkt_chkr_t handle, uint64_t mac_addr);
+void ark_pktchkr_set_eth_type(ark_pkt_chkr_t handle, uint32_t x);
+void ark_pktchkr_set_hdr_dW(ark_pkt_chkr_t handle, uint32_t *hdr);
+void ark_pktchkr_parse(char *args);
+void ark_pktchkr_setup(ark_pkt_chkr_t handle);
+void ark_pktchkr_dump_stats(ark_pkt_chkr_t handle);
+int ark_pktchkr_wait_done(ark_pkt_chkr_t handle);
+
+#endif
diff --git a/drivers/net/ark/ark_pktdir.c b/drivers/net/ark/ark_pktdir.c
new file mode 100644
index 00000000..66e5ce24
--- /dev/null
+++ b/drivers/net/ark/ark_pktdir.c
@@ -0,0 +1,80 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#include "ark_pktdir.h"
+#include "ark_global.h"
+
+
+ark_pkt_dir_t
+ark_pktdir_init(void *base)
+{
+ struct ark_pkt_dir_inst *inst =
+ rte_malloc("ark_pkt_dir_inst",
+ sizeof(struct ark_pkt_dir_inst),
+ 0);
+ inst->regs = (struct ark_pkt_dir_regs *)base;
+ inst->regs->ctrl = 0x00110110; /* POR state */
+ return inst;
+}
+
+void
+ark_pktdir_uninit(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+
+ rte_free(inst);
+}
+
+void
+ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ inst->regs->ctrl = v;
+}
+
+uint32_t
+ark_pktdir_status(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ return inst->regs->ctrl;
+}
+
+uint32_t
+ark_pktdir_stall_cnt(ark_pkt_dir_t handle)
+{
+ struct ark_pkt_dir_inst *inst = (struct ark_pkt_dir_inst *)handle;
+ return inst->regs->stall_cnt;
+}
diff --git a/drivers/net/ark/ark_pktdir.h b/drivers/net/ark/ark_pktdir.h
new file mode 100644
index 00000000..e13fe821
--- /dev/null
+++ b/drivers/net/ark/ark_pktdir.h
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_PKTDIR_H_
+#define _ARK_PKTDIR_H_
+
+#include <stdint.h>
+
+#define ARK_PKTDIR_BASE_ADR 0xa0000
+
+typedef void *ark_pkt_dir_t;
+
+
+/* The packet director is an internal Arkville hardware module for
+ * directing packet data in non-typical flows, such as testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This is an overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_dir_regs {
+ uint32_t ctrl;
+ uint32_t status;
+ uint32_t stall_cnt;
+} __attribute__ ((packed));
+
+struct ark_pkt_dir_inst {
+ volatile struct ark_pkt_dir_regs *regs;
+};
+
+ark_pkt_dir_t ark_pktdir_init(void *base);
+void ark_pktdir_uninit(ark_pkt_dir_t handle);
+void ark_pktdir_setup(ark_pkt_dir_t handle, uint32_t v);
+uint32_t ark_pktdir_stall_cnt(ark_pkt_dir_t handle);
+uint32_t ark_pktdir_status(ark_pkt_dir_t handle);
+
+#endif
diff --git a/drivers/net/ark/ark_pktgen.c b/drivers/net/ark/ark_pktgen.c
new file mode 100644
index 00000000..bdac054e
--- /dev/null
+++ b/drivers/net/ark/ark_pktgen.c
@@ -0,0 +1,496 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <getopt.h>
+#include <sys/time.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include <rte_eal.h>
+
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "ark_pktgen.h"
+#include "ark_logs.h"
+
+#define ARK_MAX_STR_LEN 64
+union OPTV {
+ int INT;
+ int BOOL;
+ uint64_t LONG;
+ char STR[ARK_MAX_STR_LEN];
+};
+
+enum OPTYPE {
+ OTINT,
+ OTLONG,
+ OTBOOL,
+ OTSTRING
+};
+
+struct OPTIONS {
+ char opt[ARK_MAX_STR_LEN];
+ enum OPTYPE t;
+ union OPTV v;
+};
+
+static struct OPTIONS toptions[] = {
+ {{"configure"}, OTBOOL, {1} },
+ {{"dg-mode"}, OTBOOL, {1} },
+ {{"run"}, OTBOOL, {0} },
+ {{"pause"}, OTBOOL, {0} },
+ {{"reset"}, OTBOOL, {0} },
+ {{"dump"}, OTBOOL, {0} },
+ {{"gen_forever"}, OTBOOL, {0} },
+ {{"en_slaved_start"}, OTBOOL, {0} },
+ {{"vary_length"}, OTBOOL, {0} },
+ {{"incr_payload"}, OTBOOL, {0} },
+ {{"incr_first_byte"}, OTBOOL, {0} },
+ {{"ins_seq_num"}, OTBOOL, {0} },
+ {{"ins_time_stamp"}, OTBOOL, {1} },
+ {{"ins_udp_hdr"}, OTBOOL, {0} },
+ {{"num_pkts"}, OTLONG, .v.LONG = 100000000},
+ {{"payload_byte"}, OTINT, {0x55} },
+ {{"pkt_spacing"}, OTINT, {130} },
+ {{"pkt_size_min"}, OTINT, {2006} },
+ {{"pkt_size_max"}, OTINT, {1514} },
+ {{"pkt_size_incr"}, OTINT, {1} },
+ {{"eth_type"}, OTINT, {0x0800} },
+ {{"src_mac_addr"}, OTLONG, .v.LONG = 0xdC3cF6425060L},
+ {{"dst_mac_addr"}, OTLONG, .v.LONG = 0x112233445566L},
+ {{"hdr_dW0"}, OTINT, {0x0016e319} },
+ {{"hdr_dW1"}, OTINT, {0x27150004} },
+ {{"hdr_dW2"}, OTINT, {0x76967bda} },
+ {{"hdr_dW3"}, OTINT, {0x08004500} },
+ {{"hdr_dW4"}, OTINT, {0x005276ed} },
+ {{"hdr_dW5"}, OTINT, {0x40004006} },
+ {{"hdr_dW6"}, OTINT, {0x56cfc0a8} },
+ {{"start_offset"}, OTINT, {0} },
+ {{"bytes_per_cycle"}, OTINT, {10} },
+ {{"shaping"}, OTBOOL, {0} },
+ {{"dst_ip"}, OTSTRING, .v.STR = "169.254.10.240"},
+ {{"dst_port"}, OTINT, {65536} },
+ {{"src_port"}, OTINT, {65536} },
+};
+
+ark_pkt_gen_t
+ark_pktgen_init(void *adr, int ord, int l2_mode)
+{
+ struct ark_pkt_gen_inst *inst =
+ rte_malloc("ark_pkt_gen_inst_pmd",
+ sizeof(struct ark_pkt_gen_inst), 0);
+ inst->regs = (struct ark_pkt_gen_regs *)adr;
+ inst->ordinal = ord;
+ inst->l2_mode = l2_mode;
+ return inst;
+}
+
+void
+ark_pktgen_uninit(ark_pkt_gen_t handle)
+{
+ rte_free(handle);
+}
+
+void
+ark_pktgen_run(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ inst->regs->pkt_start_stop = 1;
+}
+
+uint32_t
+ark_pktgen_paused(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return (((r >> 16) & 1) == 1);
+}
+
+void
+ark_pktgen_pause(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ int cnt = 0;
+
+ inst->regs->pkt_start_stop = 0;
+
+ while (!ark_pktgen_paused(handle)) {
+ usleep(1000);
+ if (cnt++ > 100) {
+ PMD_DRV_LOG(ERR, "Pktgen %d failed to pause.\n",
+ inst->ordinal);
+ break;
+ }
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d paused.\n", inst->ordinal);
+}
+
+void
+ark_pktgen_reset(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ if (!ark_pktgen_is_running(handle) &&
+ !ark_pktgen_paused(handle)) {
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d is not running"
+ " and is not paused. No need to reset.\n",
+ inst->ordinal);
+ return;
+ }
+
+ if (ark_pktgen_is_running(handle) &&
+ !ark_pktgen_paused(handle)) {
+ PMD_DEBUG_LOG(DEBUG,
+ "Pktgen %d is not paused. Pausing first.\n",
+ inst->ordinal);
+ ark_pktgen_pause(handle);
+ }
+
+ PMD_DEBUG_LOG(DEBUG, "Resetting pktgen %d.\n", inst->ordinal);
+ inst->regs->pkt_start_stop = (1 << 8);
+}
+
+uint32_t
+ark_pktgen_tx_done(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+uint32_t
+ark_pktgen_is_running(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_start_stop;
+
+ return ((r & 1) == 1);
+}
+
+uint32_t
+ark_pktgen_is_gen_forever(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ uint32_t r = inst->regs->pkt_ctrl;
+
+ return (((r >> 24) & 1) == 1);
+}
+
+void
+ark_pktgen_wait_done(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ int wait_cycle = 10;
+
+ if (ark_pktgen_is_gen_forever(handle))
+ PMD_DRV_LOG(ERR, "Pktgen wait_done will not terminate"
+ " because gen_forever=1\n");
+
+ while (!ark_pktgen_tx_done(handle) && (wait_cycle > 0)) {
+ usleep(1000);
+ wait_cycle--;
+ PMD_DEBUG_LOG(DEBUG,
+ "Waiting for pktgen %d to finish sending...\n",
+ inst->ordinal);
+ }
+ PMD_DEBUG_LOG(DEBUG, "Pktgen %d done.\n", inst->ordinal);
+}
+
+uint32_t
+ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ return inst->regs->pkts_sent;
+}
+
+void
+ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_payload = b;
+}
+
+void
+ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_spacing = x;
+}
+
+void
+ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_min = x;
+}
+
+void
+ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_max = x;
+}
+
+void
+ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->pkt_size_incr = x;
+}
+
+void
+ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->num_pkts = x;
+}
+
+void
+ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->src_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->regs->src_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->dst_mac_addr_h = (mac_addr >> 32) & 0xffff;
+ inst->regs->dst_mac_addr_l = mac_addr & 0xffffffff;
+}
+
+void
+ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+ inst->regs->eth_type = x;
+}
+
+void
+ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr)
+{
+ uint32_t i;
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ for (i = 0; i < 7; i++)
+ inst->regs->hdr_dw[i] = hdr[i];
+}
+
+void
+ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x)
+{
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ inst->regs->start_offset = x;
+}
+
+static struct OPTIONS *
+options(const char *id)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(toptions) / sizeof(struct OPTIONS); i++) {
+ if (strcmp(id, toptions[i].opt) == 0)
+ return &toptions[i];
+ }
+
+ PMD_DRV_LOG(ERR,
+ "Pktgen: Could not find requested option!, "
+ "option = %s\n",
+ id
+ );
+ return NULL;
+}
+
+static int pmd_set_arg(char *arg, char *val);
+static int
+pmd_set_arg(char *arg, char *val)
+{
+ struct OPTIONS *o = options(arg);
+
+ if (o) {
+ switch (o->t) {
+ case OTINT:
+ case OTBOOL:
+ o->v.INT = atoi(val);
+ break;
+ case OTLONG:
+ o->v.INT = atoll(val);
+ break;
+ case OTSTRING:
+ strncpy(o->v.STR, val, ARK_MAX_STR_LEN);
+ break;
+ }
+ return 1;
+ }
+ return 0;
+}
+
+/******
+ * Arg format = "opt0=v,opt_n=v ..."
+ ******/
+void
+ark_pktgen_parse(char *args)
+{
+ char *argv, *v;
+ const char toks[] = " =\n\t\v\f \r";
+ argv = strtok(args, toks);
+ v = strtok(NULL, toks);
+ while (argv && v) {
+ pmd_set_arg(argv, v);
+ argv = strtok(NULL, toks);
+ v = strtok(NULL, toks);
+ }
+}
+
+static int32_t parse_ipv4_string(char const *ip_address);
+static int32_t
+parse_ipv4_string(char const *ip_address)
+{
+ unsigned int ip[4];
+
+ if (sscanf(ip_address, "%u.%u.%u.%u",
+ &ip[0], &ip[1], &ip[2], &ip[3]) != 4)
+ return 0;
+ return ip[3] + ip[2] * 0x100 + ip[1] * 0x10000ul + ip[0] * 0x1000000ul;
+}
+
+static void
+ark_pktgen_set_pkt_ctrl(ark_pkt_gen_t handle,
+ uint32_t gen_forever,
+ uint32_t en_slaved_start,
+ uint32_t vary_length,
+ uint32_t incr_payload,
+ uint32_t incr_first_byte,
+ uint32_t ins_seq_num,
+ uint32_t ins_udp_hdr,
+ uint32_t ins_time_stamp)
+{
+ uint32_t r;
+ struct ark_pkt_gen_inst *inst = (struct ark_pkt_gen_inst *)handle;
+
+ if (!inst->l2_mode)
+ ins_udp_hdr = 0;
+
+ r = ((gen_forever << 24) |
+ (en_slaved_start << 20) |
+ (vary_length << 16) |
+ (incr_payload << 12) |
+ (incr_first_byte << 8) |
+ (ins_time_stamp << 5) |
+ (ins_seq_num << 4) |
+ ins_udp_hdr);
+
+ inst->regs->bytes_per_cycle = options("bytes_per_cycle")->v.INT;
+ if (options("shaping")->v.BOOL)
+ r = r | (1 << 28); /* enable shaping */
+
+ inst->regs->pkt_ctrl = r;
+}
+
+void
+ark_pktgen_setup(ark_pkt_gen_t handle)
+{
+ uint32_t hdr[7];
+ int32_t dst_ip = parse_ipv4_string(options("dst_ip")->v.STR);
+
+ if (!options("pause")->v.BOOL &&
+ (!options("reset")->v.BOOL &&
+ (options("configure")->v.BOOL))) {
+ ark_pktgen_set_payload_byte(handle,
+ options("payload_byte")->v.INT);
+ ark_pktgen_set_src_mac_addr(handle,
+ options("src_mac_addr")->v.INT);
+ ark_pktgen_set_dst_mac_addr(handle,
+ options("dst_mac_addr")->v.LONG);
+ ark_pktgen_set_eth_type(handle,
+ options("eth_type")->v.INT);
+
+ if (options("dg-mode")->v.BOOL) {
+ hdr[0] = options("hdr_dW0")->v.INT;
+ hdr[1] = options("hdr_dW1")->v.INT;
+ hdr[2] = options("hdr_dW2")->v.INT;
+ hdr[3] = options("hdr_dW3")->v.INT;
+ hdr[4] = options("hdr_dW4")->v.INT;
+ hdr[5] = options("hdr_dW5")->v.INT;
+ hdr[6] = options("hdr_dW6")->v.INT;
+ } else {
+ hdr[0] = dst_ip;
+ hdr[1] = options("dst_port")->v.INT;
+ hdr[2] = options("src_port")->v.INT;
+ hdr[3] = 0;
+ hdr[4] = 0;
+ hdr[5] = 0;
+ hdr[6] = 0;
+ }
+ ark_pktgen_set_hdr_dW(handle, hdr);
+ ark_pktgen_set_num_pkts(handle,
+ options("num_pkts")->v.INT);
+ ark_pktgen_set_pkt_size_min(handle,
+ options("pkt_size_min")->v.INT);
+ ark_pktgen_set_pkt_size_max(handle,
+ options("pkt_size_max")->v.INT);
+ ark_pktgen_set_pkt_size_incr(handle,
+ options("pkt_size_incr")->v.INT);
+ ark_pktgen_set_pkt_spacing(handle,
+ options("pkt_spacing")->v.INT);
+ ark_pktgen_set_start_offset(handle,
+ options("start_offset")->v.INT);
+ ark_pktgen_set_pkt_ctrl(handle,
+ options("gen_forever")->v.BOOL,
+ options("en_slaved_start")->v.BOOL,
+ options("vary_length")->v.BOOL,
+ options("incr_payload")->v.BOOL,
+ options("incr_first_byte")->v.BOOL,
+ options("ins_seq_num")->v.INT,
+ options("ins_udp_hdr")->v.BOOL,
+ options("ins_time_stamp")->v.INT);
+ }
+
+ if (options("pause")->v.BOOL)
+ ark_pktgen_pause(handle);
+
+ if (options("reset")->v.BOOL)
+ ark_pktgen_reset(handle);
+ if (options("run")->v.BOOL) {
+ PMD_DEBUG_LOG(DEBUG, "Starting packet generator on port %d\n",
+ options("port")->v.INT);
+ ark_pktgen_run(handle);
+ }
+}
diff --git a/drivers/net/ark/ark_pktgen.h b/drivers/net/ark/ark_pktgen.h
new file mode 100644
index 00000000..bf5a241b
--- /dev/null
+++ b/drivers/net/ark/ark_pktgen.h
@@ -0,0 +1,108 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_PKTGEN_H_
+#define _ARK_PKTGEN_H_
+
+#include <stdint.h>
+#include <inttypes.h>
+
+#define ARK_PKTGEN_BASE_ADR 0x10000
+
+typedef void *ark_pkt_gen_t;
+
+/* The packet generator is an internal Arkville hardware module, which
+ * generates known packets for use in integrity and line-rate testing.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * This is an overlay structure to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_pkt_gen_regs {
+ uint32_t r0;
+ volatile uint32_t pkt_start_stop;
+ volatile uint32_t pkt_ctrl;
+ uint32_t pkt_payload;
+ uint32_t pkt_spacing;
+ uint32_t pkt_size_min;
+ uint32_t pkt_size_max;
+ uint32_t pkt_size_incr;
+ volatile uint32_t num_pkts;
+ volatile uint32_t pkts_sent;
+ uint32_t src_mac_addr_l;
+ uint32_t src_mac_addr_h;
+ uint32_t dst_mac_addr_l;
+ uint32_t dst_mac_addr_h;
+ uint32_t eth_type;
+ uint32_t hdr_dw[7];
+ uint32_t start_offset;
+ uint32_t bytes_per_cycle;
+} __attribute__ ((packed));
+
+struct ark_pkt_gen_inst {
+ struct rte_eth_dev_info *dev_info;
+ struct ark_pkt_gen_regs *regs;
+ int l2_mode;
+ int ordinal;
+};
+
+/* packet generator functions */
+ark_pkt_gen_t ark_pktgen_init(void *arg, int ord, int l2_mode);
+void ark_pktgen_uninit(ark_pkt_gen_t handle);
+void ark_pktgen_run(ark_pkt_gen_t handle);
+void ark_pktgen_pause(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_paused(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_is_gen_forever(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_is_running(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_tx_done(ark_pkt_gen_t handle);
+void ark_pktgen_reset(ark_pkt_gen_t handle);
+void ark_pktgen_wait_done(ark_pkt_gen_t handle);
+uint32_t ark_pktgen_get_pkts_sent(ark_pkt_gen_t handle);
+void ark_pktgen_set_payload_byte(ark_pkt_gen_t handle, uint32_t b);
+void ark_pktgen_set_pkt_spacing(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_min(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_max(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_pkt_size_incr(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_num_pkts(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_src_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+void ark_pktgen_set_dst_mac_addr(ark_pkt_gen_t handle, uint64_t mac_addr);
+void ark_pktgen_set_eth_type(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_set_hdr_dW(ark_pkt_gen_t handle, uint32_t *hdr);
+void ark_pktgen_set_start_offset(ark_pkt_gen_t handle, uint32_t x);
+void ark_pktgen_parse(char *argv);
+void ark_pktgen_setup(ark_pkt_gen_t handle);
+
+#endif
diff --git a/drivers/net/ark/ark_rqp.c b/drivers/net/ark/ark_rqp.c
new file mode 100644
index 00000000..41c497b0
--- /dev/null
+++ b/drivers/net/ark/ark_rqp.c
@@ -0,0 +1,97 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_rqp.h"
+#include "ark_logs.h"
+
+/* ************************************************************************* */
+void
+ark_rqp_stats_reset(struct ark_rqpace_t *rqp)
+{
+ rqp->stats_clear = 1;
+ /* POR 992 */
+ /* rqp->cpld_max = 992; */
+ /* POR 64 */
+ /* rqp->cplh_max = 64; */
+}
+
+/* ************************************************************************* */
+void
+ark_rqp_dump(struct ark_rqpace_t *rqp)
+{
+ if (rqp->err_count_other != 0)
+ PMD_DRV_LOG(ERR,
+ "RQP Errors noted: ctrl: %d cplh_hmax %d cpld_max %d"
+ ARK_SU32
+ ARK_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->err_cnt,
+ "Error General", rqp->err_count_other);
+
+ PMD_STATS_LOG(INFO, "RQP Dump: ctrl: %d cplh_hmax %d cpld_max %d"
+ ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
+ rqp->ctrl, rqp->cplh_max, rqp->cpld_max,
+ "Error Count", rqp->err_cnt,
+ "Error General", rqp->err_count_other,
+ "stall_pS", rqp->stall_ps,
+ "stall_pS Min", rqp->stall_ps_min,
+ "stall_pS Max", rqp->stall_ps_max,
+ "req_pS", rqp->req_ps,
+ "req_pS Min", rqp->req_ps_min,
+ "req_pS Max", rqp->req_ps_max,
+ "req_dWPS", rqp->req_dw_ps,
+ "req_dWPS Min", rqp->req_dw_ps_min,
+ "req_dWPS Max", rqp->req_dw_ps_max,
+ "cpl_pS", rqp->cpl_ps,
+ "cpl_pS Min", rqp->cpl_ps_min,
+ "cpl_pS Max", rqp->cpl_ps_max,
+ "cpl_dWPS", rqp->cpl_dw_ps,
+ "cpl_dWPS Min", rqp->cpl_dw_ps_min,
+ "cpl_dWPS Max", rqp->cpl_dw_ps_max,
+ "cplh pending", rqp->cplh_pending,
+ "cpld pending", rqp->cpld_pending,
+ "cplh pending max", rqp->cplh_pending_max,
+ "cpld pending max", rqp->cpld_pending_max);
+}
+
+int
+ark_rqp_lasped(struct ark_rqpace_t *rqp)
+{
+ return rqp->lasped;
+}
diff --git a/drivers/net/ark/ark_rqp.h b/drivers/net/ark/ark_rqp.h
new file mode 100644
index 00000000..0c380071
--- /dev/null
+++ b/drivers/net/ark/ark_rqp.h
@@ -0,0 +1,86 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_RQP_H_
+#define _ARK_RQP_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The RQP or ReQuest Pacer is an internal Arkville hardware module
+ * which limits the PCIE data flow to insure correct operation for the
+ * particular hardware PCIE endpoint.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/*
+ * RQ Pacing core hardware structure
+ * This is an overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+struct ark_rqpace_t {
+ volatile uint32_t ctrl;
+ volatile uint32_t stats_clear;
+ volatile uint32_t cplh_max;
+ volatile uint32_t cpld_max;
+ volatile uint32_t err_cnt;
+ volatile uint32_t stall_ps;
+ volatile uint32_t stall_ps_min;
+ volatile uint32_t stall_ps_max;
+ volatile uint32_t req_ps;
+ volatile uint32_t req_ps_min;
+ volatile uint32_t req_ps_max;
+ volatile uint32_t req_dw_ps;
+ volatile uint32_t req_dw_ps_min;
+ volatile uint32_t req_dw_ps_max;
+ volatile uint32_t cpl_ps;
+ volatile uint32_t cpl_ps_min;
+ volatile uint32_t cpl_ps_max;
+ volatile uint32_t cpl_dw_ps;
+ volatile uint32_t cpl_dw_ps_min;
+ volatile uint32_t cpl_dw_ps_max;
+ volatile uint32_t cplh_pending;
+ volatile uint32_t cpld_pending;
+ volatile uint32_t cplh_pending_max;
+ volatile uint32_t cpld_pending_max;
+ volatile uint32_t err_count_other;
+ char eval[4];
+ volatile int lasped;
+};
+
+void ark_rqp_dump(struct ark_rqpace_t *rqp);
+void ark_rqp_stats_reset(struct ark_rqpace_t *rqp);
+int ark_rqp_lasped(struct ark_rqpace_t *rqp);
+#endif
diff --git a/drivers/net/ark/ark_udm.c b/drivers/net/ark/ark_udm.c
new file mode 100644
index 00000000..1ba7d26d
--- /dev/null
+++ b/drivers/net/ark/ark_udm.c
@@ -0,0 +1,226 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+
+#include "ark_logs.h"
+#include "ark_udm.h"
+
+int
+ark_udm_verify(struct ark_udm_t *udm)
+{
+ if (sizeof(struct ark_udm_t) != ARK_UDM_EXPECT_SIZE) {
+ PMD_DRV_LOG(ERR,
+ "ARK: UDM structure looks incorrect %d vs %zd\n",
+ ARK_UDM_EXPECT_SIZE, sizeof(struct ark_udm_t));
+ return -1;
+ }
+
+ if (udm->setup.const0 != ARK_UDM_CONST) {
+ PMD_DRV_LOG(ERR,
+ "ARK: UDM module not found as expected 0x%08x\n",
+ udm->setup.const0);
+ return -1;
+ }
+ return 0;
+}
+
+int
+ark_udm_stop(struct ark_udm_t *udm, const int wait)
+{
+ int cnt = 0;
+
+ udm->cfg.command = 2;
+
+ while (wait && (udm->cfg.stop_flushed & 0x01) == 0) {
+ if (cnt++ > 1000)
+ return 1;
+
+ usleep(10);
+ }
+ return 0;
+}
+
+int
+ark_udm_reset(struct ark_udm_t *udm)
+{
+ int status;
+
+ status = ark_udm_stop(udm, 1);
+ if (status != 0) {
+ PMD_DEBUG_LOG(INFO, "%s stop failed doing forced reset\n",
+ __func__);
+ udm->cfg.command = 4;
+ usleep(10);
+ udm->cfg.command = 3;
+ status = ark_udm_stop(udm, 0);
+ PMD_DEBUG_LOG(INFO, "%s stop status %d post failure"
+ " and forced reset\n",
+ __func__, status);
+ } else {
+ udm->cfg.command = 3;
+ }
+
+ return status;
+}
+
+void
+ark_udm_start(struct ark_udm_t *udm)
+{
+ udm->cfg.command = 1;
+}
+
+void
+ark_udm_stats_reset(struct ark_udm_t *udm)
+{
+ udm->pcibp.pci_clear = 1;
+ udm->tlp_ps.tlp_clear = 1;
+}
+
+void
+ark_udm_configure(struct ark_udm_t *udm,
+ uint32_t headroom,
+ uint32_t dataroom,
+ uint32_t write_interval_ns)
+{
+ /* headroom and data room are in DWords in the UDM */
+ udm->cfg.dataroom = dataroom / 4;
+ udm->cfg.headroom = headroom / 4;
+
+ /* 4 NS period ns */
+ udm->rt_cfg.write_interval = write_interval_ns / 4;
+}
+
+void
+ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr)
+{
+ udm->rt_cfg.hw_prod_addr = addr;
+}
+
+int
+ark_udm_is_flushed(struct ark_udm_t *udm)
+{
+ return (udm->cfg.stop_flushed & 0x01) != 0;
+}
+
+uint64_t
+ark_udm_dropped(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_pkt_drop;
+}
+
+uint64_t
+ark_udm_bytes(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_byte_count;
+}
+
+uint64_t
+ark_udm_packets(struct ark_udm_t *udm)
+{
+ return udm->qstats.q_ff_packet_count;
+}
+
+void
+ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg)
+{
+ PMD_STATS_LOG(INFO, "UDM Stats: %s"
+ ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 ARK_SU64 "\n",
+ msg,
+ "Pkts Received", udm->stats.rx_packet_count,
+ "Pkts Finalized", udm->stats.rx_sent_packets,
+ "Pkts Dropped", udm->tlp.pkt_drop,
+ "Bytes Count", udm->stats.rx_byte_count,
+ "MBuf Count", udm->stats.rx_mbuf_count);
+}
+
+void
+ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg, uint16_t qid)
+{
+ PMD_STATS_LOG(INFO, "UDM Queue %3u Stats: %s"
+ ARK_SU64 ARK_SU64
+ ARK_SU64 ARK_SU64
+ ARK_SU64 "\n",
+ qid, msg,
+ "Pkts Received", udm->qstats.q_packet_count,
+ "Pkts Finalized", udm->qstats.q_ff_packet_count,
+ "Pkts Dropped", udm->qstats.q_pkt_drop,
+ "Bytes Count", udm->qstats.q_byte_count,
+ "MBuf Count", udm->qstats.q_mbuf_count);
+}
+
+void
+ark_udm_dump(struct ark_udm_t *udm, const char *msg)
+{
+ PMD_DEBUG_LOG(DEBUG, "UDM Dump: %s Stopped: %d\n", msg,
+ udm->cfg.stop_flushed);
+}
+
+void
+ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id)
+{
+ PMD_DEBUG_LOG(DEBUG, "UDM Setup Q: %u"
+ ARK_SU64X ARK_SU32 "\n",
+ q_id,
+ "hw_prod_addr", udm->rt_cfg.hw_prod_addr,
+ "prod_idx", udm->rt_cfg.prod_idx);
+}
+
+void
+ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg)
+{
+ struct ark_udm_pcibp_t *bp = &udm->pcibp;
+
+ PMD_STATS_LOG(INFO, "UDM Performance %s"
+ ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32
+ "\n",
+ msg,
+ "PCI Empty", bp->pci_empty,
+ "PCI Q1", bp->pci_q1,
+ "PCI Q2", bp->pci_q2,
+ "PCI Q3", bp->pci_q3,
+ "PCI Q4", bp->pci_q4,
+ "PCI Full", bp->pci_full);
+}
+
+void
+ark_udm_queue_stats_reset(struct ark_udm_t *udm)
+{
+ udm->qstats.q_byte_count = 1;
+}
+
+void
+ark_udm_queue_enable(struct ark_udm_t *udm, int enable)
+{
+ udm->qstats.q_enable = enable ? 1 : 0;
+}
diff --git a/drivers/net/ark/ark_udm.h b/drivers/net/ark/ark_udm.h
new file mode 100644
index 00000000..29bf1e8f
--- /dev/null
+++ b/drivers/net/ark/ark_udm.h
@@ -0,0 +1,192 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2017 Atomic Rules LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ARK_UDM_H_
+#define _ARK_UDM_H_
+
+#include <stdint.h>
+
+#include <rte_memory.h>
+
+/* The UDM or Upstream Data Mover is an internal Arkville hardware
+ * module for moving packet from the RX packet streams to host memory.
+ * This module is *not* intended for end-user manipulation, hence
+ * there is minimal documentation.
+ */
+
+/* Meta data structure apssed from FPGA, must match layout in FPGA */
+struct ark_rx_meta {
+ uint64_t timestamp;
+ uint64_t user_data;
+ uint8_t port;
+ uint8_t dst_queue;
+ uint16_t pkt_len;
+};
+
+/*
+ * UDM hardware structures
+ * These are overlay structures to a memory mapped FPGA device. These
+ * structs will never be instantiated in ram memory
+ */
+
+#define ARK_RX_WRITE_TIME_NS 2500
+#define ARK_UDM_SETUP 0
+#define ARK_UDM_CONST 0xbACECACE
+struct ark_udm_setup_t {
+ uint32_t r0;
+ uint32_t r4;
+ volatile uint32_t cycle_count;
+ uint32_t const0;
+};
+
+#define ARK_UDM_CFG 0x010
+struct ark_udm_cfg_t {
+ volatile uint32_t stop_flushed; /* RO */
+ volatile uint32_t command;
+ uint32_t dataroom;
+ uint32_t headroom;
+};
+
+typedef enum {
+ ARK_UDM_START = 0x1,
+ ARK_UDM_STOP = 0x2,
+ ARK_UDM_RESET = 0x3
+} ark_udm_commands;
+
+#define ARK_UDM_STATS 0x020
+struct ark_udm_stats_t {
+ volatile uint64_t rx_byte_count;
+ volatile uint64_t rx_packet_count;
+ volatile uint64_t rx_mbuf_count;
+ volatile uint64_t rx_sent_packets;
+};
+
+#define ARK_UDM_PQ 0x040
+struct ark_udm_queue_stats_t {
+ volatile uint64_t q_byte_count;
+ volatile uint64_t q_packet_count; /* includes drops */
+ volatile uint64_t q_mbuf_count;
+ volatile uint64_t q_ff_packet_count;
+ volatile uint64_t q_pkt_drop;
+ uint32_t q_enable;
+};
+
+#define ARK_UDM_TLP 0x0070
+struct ark_udm_tlp_t {
+ volatile uint64_t pkt_drop; /* global */
+ volatile uint32_t tlp_q1;
+ volatile uint32_t tlp_q2;
+ volatile uint32_t tlp_q3;
+ volatile uint32_t tlp_q4;
+ volatile uint32_t tlp_full;
+};
+
+#define ARK_UDM_PCIBP 0x00a0
+struct ark_udm_pcibp_t {
+ volatile uint32_t pci_clear;
+ volatile uint32_t pci_empty;
+ volatile uint32_t pci_q1;
+ volatile uint32_t pci_q2;
+ volatile uint32_t pci_q3;
+ volatile uint32_t pci_q4;
+ volatile uint32_t pci_full;
+};
+
+#define ARK_UDM_TLP_PS 0x00bc
+struct ark_udm_tlp_ps_t {
+ volatile uint32_t tlp_clear;
+ volatile uint32_t tlp_ps_min;
+ volatile uint32_t tlp_ps_max;
+ volatile uint32_t tlp_full_ps_min;
+ volatile uint32_t tlp_full_ps_max;
+ volatile uint32_t tlp_dw_ps_min;
+ volatile uint32_t tlp_dw_ps_max;
+ volatile uint32_t tlp_pldw_ps_min;
+ volatile uint32_t tlp_pldw_ps_max;
+};
+
+#define ARK_UDM_RT_CFG 0x00e0
+struct ark_udm_rt_cfg_t {
+ phys_addr_t hw_prod_addr;
+ uint32_t write_interval; /* 4ns cycles */
+ volatile uint32_t prod_idx; /* RO */
+};
+
+/* Consolidated structure */
+#define ARK_UDM_EXPECT_SIZE (0x00fc + 4)
+#define ARK_UDM_QOFFSET ARK_UDM_EXPECT_SIZE
+struct ark_udm_t {
+ struct ark_udm_setup_t setup;
+ struct ark_udm_cfg_t cfg;
+ struct ark_udm_stats_t stats;
+ struct ark_udm_queue_stats_t qstats;
+ uint8_t reserved1[(ARK_UDM_TLP - ARK_UDM_PQ) -
+ sizeof(struct ark_udm_queue_stats_t)];
+ struct ark_udm_tlp_t tlp;
+ uint8_t reserved2[(ARK_UDM_PCIBP - ARK_UDM_TLP) -
+ sizeof(struct ark_udm_tlp_t)];
+ struct ark_udm_pcibp_t pcibp;
+ struct ark_udm_tlp_ps_t tlp_ps;
+ struct ark_udm_rt_cfg_t rt_cfg;
+ int8_t reserved3[(ARK_UDM_EXPECT_SIZE - ARK_UDM_RT_CFG) -
+ sizeof(struct ark_udm_rt_cfg_t)];
+};
+
+
+int ark_udm_verify(struct ark_udm_t *udm);
+int ark_udm_stop(struct ark_udm_t *udm, int wait);
+void ark_udm_start(struct ark_udm_t *udm);
+int ark_udm_reset(struct ark_udm_t *udm);
+void ark_udm_configure(struct ark_udm_t *udm,
+ uint32_t headroom,
+ uint32_t dataroom,
+ uint32_t write_interval_ns);
+void ark_udm_write_addr(struct ark_udm_t *udm, phys_addr_t addr);
+void ark_udm_stats_reset(struct ark_udm_t *udm);
+void ark_udm_dump_stats(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_queue_stats(struct ark_udm_t *udm, const char *msg,
+ uint16_t qid);
+void ark_udm_dump(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_perf(struct ark_udm_t *udm, const char *msg);
+void ark_udm_dump_setup(struct ark_udm_t *udm, uint16_t q_id);
+int ark_udm_is_flushed(struct ark_udm_t *udm);
+
+/* Per queue data */
+uint64_t ark_udm_dropped(struct ark_udm_t *udm);
+uint64_t ark_udm_bytes(struct ark_udm_t *udm);
+uint64_t ark_udm_packets(struct ark_udm_t *udm);
+
+void ark_udm_queue_stats_reset(struct ark_udm_t *udm);
+void ark_udm_queue_enable(struct ark_udm_t *udm, int enable);
+
+#endif
diff --git a/drivers/net/ark/rte_pmd_ark_version.map b/drivers/net/ark/rte_pmd_ark_version.map
new file mode 100644
index 00000000..1062e042
--- /dev/null
+++ b/drivers/net/ark/rte_pmd_ark_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+ local: *;
+
+};
diff --git a/drivers/net/avp/Makefile b/drivers/net/avp/Makefile
new file mode 100644
index 00000000..cd465aac
--- /dev/null
+++ b/drivers/net/avp/Makefile
@@ -0,0 +1,57 @@
+# BSD LICENSE
+#
+# Copyright(c) 2013-2017, Wind River Systems, Inc. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Wind River Systems nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_avp.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+EXPORT_MAP := rte_pmd_avp_version.map
+
+LIBABIVER := 1
+
+# install public header files to enable compilation of the hypervisor level
+# dpdk application
+SYMLINK-$(CONFIG_RTE_LIBRTE_AVP_PMD)-include += rte_avp_common.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_AVP_PMD)-include += rte_avp_fifo.h
+
+#
+# all source files are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp_ethdev.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/avp/avp_ethdev.c b/drivers/net/avp/avp_ethdev.c
new file mode 100644
index 00000000..fe6849f5
--- /dev/null
+++ b/drivers/net/avp/avp_ethdev.c
@@ -0,0 +1,2312 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2013-2017, Wind River Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2) Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3) Neither the name of Wind River Systems nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_dev.h>
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+
+#include "rte_avp_common.h"
+#include "rte_avp_fifo.h"
+
+#include "avp_logs.h"
+
+
+static int avp_dev_create(struct rte_pci_device *pci_dev,
+ struct rte_eth_dev *eth_dev);
+
+static int avp_dev_configure(struct rte_eth_dev *dev);
+static int avp_dev_start(struct rte_eth_dev *dev);
+static void avp_dev_stop(struct rte_eth_dev *dev);
+static void avp_dev_close(struct rte_eth_dev *dev);
+static void avp_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+static void avp_vlan_offload_set(struct rte_eth_dev *dev, int mask);
+static int avp_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete);
+static void avp_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void avp_dev_promiscuous_disable(struct rte_eth_dev *dev);
+
+static int avp_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *pool);
+
+static int avp_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+
+static uint16_t avp_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_xmit_scattered_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static uint16_t avp_xmit_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+static void avp_dev_rx_queue_release(void *rxq);
+static void avp_dev_tx_queue_release(void *txq);
+
+static void avp_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void avp_dev_stats_reset(struct rte_eth_dev *dev);
+
+
+#define AVP_DEV_TO_PCI(eth_dev) RTE_DEV_TO_PCI((eth_dev)->device)
+
+
+#define AVP_MAX_RX_BURST 64
+#define AVP_MAX_TX_BURST 64
+#define AVP_MAX_MAC_ADDRS 1
+#define AVP_MIN_RX_BUFSIZE ETHER_MIN_LEN
+
+
+/*
+ * Defines the number of microseconds to wait before checking the response
+ * queue for completion.
+ */
+#define AVP_REQUEST_DELAY_USECS (5000)
+
+/*
+ * Defines the number times to check the response queue for completion before
+ * declaring a timeout.
+ */
+#define AVP_MAX_REQUEST_RETRY (100)
+
+/* Defines the current PCI driver version number */
+#define AVP_DPDK_DRIVER_VERSION RTE_AVP_CURRENT_GUEST_VERSION
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_avp_map[] = {
+ { .vendor_id = RTE_AVP_PCI_VENDOR_ID,
+ .device_id = RTE_AVP_PCI_DEVICE_ID,
+ .subsystem_vendor_id = RTE_AVP_PCI_SUB_VENDOR_ID,
+ .subsystem_device_id = RTE_AVP_PCI_SUB_DEVICE_ID,
+ .class_id = RTE_CLASS_ANY_ID,
+ },
+
+ { .vendor_id = 0, /* sentinel */
+ },
+};
+
+/*
+ * dev_ops for avp, bare necessities for basic operation
+ */
+static const struct eth_dev_ops avp_eth_dev_ops = {
+ .dev_configure = avp_dev_configure,
+ .dev_start = avp_dev_start,
+ .dev_stop = avp_dev_stop,
+ .dev_close = avp_dev_close,
+ .dev_infos_get = avp_dev_info_get,
+ .vlan_offload_set = avp_vlan_offload_set,
+ .stats_get = avp_dev_stats_get,
+ .stats_reset = avp_dev_stats_reset,
+ .link_update = avp_dev_link_update,
+ .promiscuous_enable = avp_dev_promiscuous_enable,
+ .promiscuous_disable = avp_dev_promiscuous_disable,
+ .rx_queue_setup = avp_dev_rx_queue_setup,
+ .rx_queue_release = avp_dev_rx_queue_release,
+ .tx_queue_setup = avp_dev_tx_queue_setup,
+ .tx_queue_release = avp_dev_tx_queue_release,
+};
+
+/**@{ AVP device flags */
+#define AVP_F_PROMISC (1 << 1)
+#define AVP_F_CONFIGURED (1 << 2)
+#define AVP_F_LINKUP (1 << 3)
+#define AVP_F_DETACHED (1 << 4)
+/**@} */
+
+/* Ethernet device validation marker */
+#define AVP_ETHDEV_MAGIC 0x92972862
+
+/*
+ * Defines the AVP device attributes which are attached to an RTE ethernet
+ * device
+ */
+struct avp_dev {
+ uint32_t magic; /**< Memory validation marker */
+ uint64_t device_id; /**< Unique system identifier */
+ struct ether_addr ethaddr; /**< Host specified MAC address */
+ struct rte_eth_dev_data *dev_data;
+ /**< Back pointer to ethernet device data */
+ volatile uint32_t flags; /**< Device operational flags */
+ uint8_t port_id; /**< Ethernet port identifier */
+ struct rte_mempool *pool; /**< pkt mbuf mempool */
+ unsigned int guest_mbuf_size; /**< local pool mbuf size */
+ unsigned int host_mbuf_size; /**< host mbuf size */
+ unsigned int max_rx_pkt_len; /**< maximum receive unit */
+ uint32_t host_features; /**< Supported feature bitmap */
+ uint32_t features; /**< Enabled feature bitmap */
+ unsigned int num_tx_queues; /**< Negotiated number of transmit queues */
+ unsigned int max_tx_queues; /**< Maximum number of transmit queues */
+ unsigned int num_rx_queues; /**< Negotiated number of receive queues */
+ unsigned int max_rx_queues; /**< Maximum number of receive queues */
+
+ struct rte_avp_fifo *tx_q[RTE_AVP_MAX_QUEUES]; /**< TX queue */
+ struct rte_avp_fifo *rx_q[RTE_AVP_MAX_QUEUES]; /**< RX queue */
+ struct rte_avp_fifo *alloc_q[RTE_AVP_MAX_QUEUES];
+ /**< Allocated mbufs queue */
+ struct rte_avp_fifo *free_q[RTE_AVP_MAX_QUEUES];
+ /**< To be freed mbufs queue */
+
+ /* mutual exclusion over the 'flag' and 'resp_q/req_q' fields */
+ rte_spinlock_t lock;
+
+ /* For request & response */
+ struct rte_avp_fifo *req_q; /**< Request queue */
+ struct rte_avp_fifo *resp_q; /**< Response queue */
+ void *host_sync_addr; /**< (host) Req/Resp Mem address */
+ void *sync_addr; /**< Req/Resp Mem address */
+ void *host_mbuf_addr; /**< (host) MBUF pool start address */
+ void *mbuf_addr; /**< MBUF pool start address */
+} __rte_cache_aligned;
+
+/* RTE ethernet private data */
+struct avp_adapter {
+ struct avp_dev avp;
+} __rte_cache_aligned;
+
+
+/* 32-bit MMIO register write */
+#define AVP_WRITE32(_value, _addr) rte_write32_relaxed((_value), (_addr))
+
+/* 32-bit MMIO register read */
+#define AVP_READ32(_addr) rte_read32_relaxed((_addr))
+
+/* Macro to cast the ethernet device private data to a AVP object */
+#define AVP_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct avp_adapter *)adapter)->avp)
+
+/*
+ * Defines the structure of a AVP device queue for the purpose of handling the
+ * receive and transmit burst callback functions
+ */
+struct avp_queue {
+ struct rte_eth_dev_data *dev_data;
+ /**< Backpointer to ethernet device data */
+ struct avp_dev *avp; /**< Backpointer to AVP device */
+ uint16_t queue_id;
+ /**< Queue identifier used for indexing current queue */
+ uint16_t queue_base;
+ /**< Base queue identifier for queue servicing */
+ uint16_t queue_limit;
+ /**< Maximum queue identifier for queue servicing */
+
+ uint64_t packets;
+ uint64_t bytes;
+ uint64_t errors;
+};
+
+/* send a request and wait for a response
+ *
+ * @warning must be called while holding the avp->lock spinlock.
+ */
+static int
+avp_dev_process_request(struct avp_dev *avp, struct rte_avp_request *request)
+{
+ unsigned int retry = AVP_MAX_REQUEST_RETRY;
+ void *resp_addr = NULL;
+ unsigned int count;
+ int ret;
+
+ PMD_DRV_LOG(DEBUG, "Sending request %u to host\n", request->req_id);
+
+ request->result = -ENOTSUP;
+
+ /* Discard any stale responses before starting a new request */
+ while (avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1))
+ PMD_DRV_LOG(DEBUG, "Discarding stale response\n");
+
+ rte_memcpy(avp->sync_addr, request, sizeof(*request));
+ count = avp_fifo_put(avp->req_q, &avp->host_sync_addr, 1);
+ if (count < 1) {
+ PMD_DRV_LOG(ERR, "Cannot send request %u to host\n",
+ request->req_id);
+ ret = -EBUSY;
+ goto done;
+ }
+
+ while (retry--) {
+ /* wait for a response */
+ usleep(AVP_REQUEST_DELAY_USECS);
+
+ count = avp_fifo_count(avp->resp_q);
+ if (count >= 1) {
+ /* response received */
+ break;
+ }
+
+ if ((count < 1) && (retry == 0)) {
+ PMD_DRV_LOG(ERR, "Timeout while waiting for a response for %u\n",
+ request->req_id);
+ ret = -ETIME;
+ goto done;
+ }
+ }
+
+ /* retrieve the response */
+ count = avp_fifo_get(avp->resp_q, (void **)&resp_addr, 1);
+ if ((count != 1) || (resp_addr != avp->host_sync_addr)) {
+ PMD_DRV_LOG(ERR, "Invalid response from host, count=%u resp=%p host_sync_addr=%p\n",
+ count, resp_addr, avp->host_sync_addr);
+ ret = -ENODATA;
+ goto done;
+ }
+
+ /* copy to user buffer */
+ rte_memcpy(request, avp->sync_addr, sizeof(*request));
+ ret = 0;
+
+ PMD_DRV_LOG(DEBUG, "Result %d received for request %u\n",
+ request->result, request->req_id);
+
+done:
+ return ret;
+}
+
+static int
+avp_dev_ctrl_set_link_state(struct rte_eth_dev *eth_dev, unsigned int state)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_request request;
+ int ret;
+
+ /* setup a link state change request */
+ memset(&request, 0, sizeof(request));
+ request.req_id = RTE_AVP_REQ_CFG_NETWORK_IF;
+ request.if_up = state;
+
+ ret = avp_dev_process_request(avp, &request);
+
+ return ret == 0 ? request.result : ret;
+}
+
+static int
+avp_dev_ctrl_set_config(struct rte_eth_dev *eth_dev,
+ struct rte_avp_device_config *config)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_request request;
+ int ret;
+
+ /* setup a configure request */
+ memset(&request, 0, sizeof(request));
+ request.req_id = RTE_AVP_REQ_CFG_DEVICE;
+ memcpy(&request.config, config, sizeof(request.config));
+
+ ret = avp_dev_process_request(avp, &request);
+
+ return ret == 0 ? request.result : ret;
+}
+
+static int
+avp_dev_ctrl_shutdown(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_request request;
+ int ret;
+
+ /* setup a shutdown request */
+ memset(&request, 0, sizeof(request));
+ request.req_id = RTE_AVP_REQ_SHUTDOWN_DEVICE;
+
+ ret = avp_dev_process_request(avp, &request);
+
+ return ret == 0 ? request.result : ret;
+}
+
+/* translate from host mbuf virtual address to guest virtual address */
+static inline void *
+avp_dev_translate_buffer(struct avp_dev *avp, void *host_mbuf_address)
+{
+ return RTE_PTR_ADD(RTE_PTR_SUB(host_mbuf_address,
+ (uintptr_t)avp->host_mbuf_addr),
+ (uintptr_t)avp->mbuf_addr);
+}
+
+/* translate from host physical address to guest virtual address */
+static void *
+avp_dev_translate_address(struct rte_eth_dev *eth_dev,
+ phys_addr_t host_phys_addr)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_mem_resource *resource;
+ struct rte_avp_memmap_info *info;
+ struct rte_avp_memmap *map;
+ off_t offset;
+ void *addr;
+ unsigned int i;
+
+ addr = pci_dev->mem_resource[RTE_AVP_PCI_MEMORY_BAR].addr;
+ resource = &pci_dev->mem_resource[RTE_AVP_PCI_MEMMAP_BAR];
+ info = (struct rte_avp_memmap_info *)resource->addr;
+
+ offset = 0;
+ for (i = 0; i < info->nb_maps; i++) {
+ /* search all segments looking for a matching address */
+ map = &info->maps[i];
+
+ if ((host_phys_addr >= map->phys_addr) &&
+ (host_phys_addr < (map->phys_addr + map->length))) {
+ /* address is within this segment */
+ offset += (host_phys_addr - map->phys_addr);
+ addr = RTE_PTR_ADD(addr, offset);
+
+ PMD_DRV_LOG(DEBUG, "Translating host physical 0x%" PRIx64 " to guest virtual 0x%p\n",
+ host_phys_addr, addr);
+
+ return addr;
+ }
+ offset += map->length;
+ }
+
+ return NULL;
+}
+
+/* verify that the incoming device version is compatible with our version */
+static int
+avp_dev_version_check(uint32_t version)
+{
+ uint32_t driver = RTE_AVP_STRIP_MINOR_VERSION(AVP_DPDK_DRIVER_VERSION);
+ uint32_t device = RTE_AVP_STRIP_MINOR_VERSION(version);
+
+ if (device <= driver) {
+ /* the host driver version is less than or equal to ours */
+ return 0;
+ }
+
+ return 1;
+}
+
+/* verify that memory regions have expected version and validation markers */
+static int
+avp_dev_check_regions(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct rte_avp_memmap_info *memmap;
+ struct rte_avp_device_info *info;
+ struct rte_mem_resource *resource;
+ unsigned int i;
+
+ /* Dump resource info for debug */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ resource = &pci_dev->mem_resource[i];
+ if ((resource->phys_addr == 0) || (resource->len == 0))
+ continue;
+
+ PMD_DRV_LOG(DEBUG, "resource[%u]: phys=0x%" PRIx64 " len=%" PRIu64 " addr=%p\n",
+ i, resource->phys_addr,
+ resource->len, resource->addr);
+
+ switch (i) {
+ case RTE_AVP_PCI_MEMMAP_BAR:
+ memmap = (struct rte_avp_memmap_info *)resource->addr;
+ if ((memmap->magic != RTE_AVP_MEMMAP_MAGIC) ||
+ (memmap->version != RTE_AVP_MEMMAP_VERSION)) {
+ PMD_DRV_LOG(ERR, "Invalid memmap magic 0x%08x and version %u\n",
+ memmap->magic, memmap->version);
+ return -EINVAL;
+ }
+ break;
+
+ case RTE_AVP_PCI_DEVICE_BAR:
+ info = (struct rte_avp_device_info *)resource->addr;
+ if ((info->magic != RTE_AVP_DEVICE_MAGIC) ||
+ avp_dev_version_check(info->version)) {
+ PMD_DRV_LOG(ERR, "Invalid device info magic 0x%08x or version 0x%08x > 0x%08x\n",
+ info->magic, info->version,
+ AVP_DPDK_DRIVER_VERSION);
+ return -EINVAL;
+ }
+ break;
+
+ case RTE_AVP_PCI_MEMORY_BAR:
+ case RTE_AVP_PCI_MMIO_BAR:
+ if (resource->addr == NULL) {
+ PMD_DRV_LOG(ERR, "Missing address space for BAR%u\n",
+ i);
+ return -EINVAL;
+ }
+ break;
+
+ case RTE_AVP_PCI_MSIX_BAR:
+ default:
+ /* no validation required */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_detach(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ PMD_DRV_LOG(NOTICE, "Detaching port %u from AVP device 0x%" PRIx64 "\n",
+ eth_dev->data->port_id, avp->device_id);
+
+ rte_spinlock_lock(&avp->lock);
+
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(NOTICE, "port %u already detached\n",
+ eth_dev->data->port_id);
+ ret = 0;
+ goto unlock;
+ }
+
+ /* shutdown the device first so the host stops sending us packets. */
+ ret = avp_dev_ctrl_shutdown(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to send/recv shutdown to host, ret=%d\n",
+ ret);
+ avp->flags &= ~AVP_F_DETACHED;
+ goto unlock;
+ }
+
+ avp->flags |= AVP_F_DETACHED;
+ rte_wmb();
+
+ /* wait for queues to acknowledge the presence of the detach flag */
+ rte_delay_ms(1);
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static void
+_avp_set_rx_queue_mappings(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
+{
+ struct avp_dev *avp =
+ AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct avp_queue *rxq;
+ uint16_t queue_count;
+ uint16_t remainder;
+
+ rxq = (struct avp_queue *)eth_dev->data->rx_queues[rx_queue_id];
+
+ /*
+ * Must map all AVP fifos as evenly as possible between the configured
+ * device queues. Each device queue will service a subset of the AVP
+ * fifos. If there is an odd number of device queues the first set of
+ * device queues will get the extra AVP fifos.
+ */
+ queue_count = avp->num_rx_queues / eth_dev->data->nb_rx_queues;
+ remainder = avp->num_rx_queues % eth_dev->data->nb_rx_queues;
+ if (rx_queue_id < remainder) {
+ /* these queues must service one extra FIFO */
+ rxq->queue_base = rx_queue_id * (queue_count + 1);
+ rxq->queue_limit = rxq->queue_base + (queue_count + 1) - 1;
+ } else {
+ /* these queues service the regular number of FIFO */
+ rxq->queue_base = ((remainder * (queue_count + 1)) +
+ ((rx_queue_id - remainder) * queue_count));
+ rxq->queue_limit = rxq->queue_base + queue_count - 1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "rxq %u at %p base %u limit %u\n",
+ rx_queue_id, rxq, rxq->queue_base, rxq->queue_limit);
+
+ rxq->queue_id = rxq->queue_base;
+}
+
+static void
+_avp_set_queue_counts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_info *host_info;
+ void *addr;
+
+ addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
+ host_info = (struct rte_avp_device_info *)addr;
+
+ /*
+ * the transmit direction is not negotiated beyond respecting the max
+ * number of queues because the host can handle arbitrary guest tx
+ * queues (host rx queues).
+ */
+ avp->num_tx_queues = eth_dev->data->nb_tx_queues;
+
+ /*
+ * the receive direction is more restrictive. The host requires a
+ * minimum number of guest rx queues (host tx queues) therefore
+ * negotiate a value that is at least as large as the host minimum
+ * requirement. If the host and guest values are not identical then a
+ * mapping will be established in the receive_queue_setup function.
+ */
+ avp->num_rx_queues = RTE_MAX(host_info->min_rx_queues,
+ eth_dev->data->nb_rx_queues);
+
+ PMD_DRV_LOG(DEBUG, "Requesting %u Tx and %u Rx queues from host\n",
+ avp->num_tx_queues, avp->num_rx_queues);
+}
+
+static int
+avp_dev_attach(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_config config;
+ unsigned int i;
+ int ret;
+
+ PMD_DRV_LOG(NOTICE, "Attaching port %u to AVP device 0x%" PRIx64 "\n",
+ eth_dev->data->port_id, avp->device_id);
+
+ rte_spinlock_lock(&avp->lock);
+
+ if (!(avp->flags & AVP_F_DETACHED)) {
+ PMD_DRV_LOG(NOTICE, "port %u already attached\n",
+ eth_dev->data->port_id);
+ ret = 0;
+ goto unlock;
+ }
+
+ /*
+ * make sure that the detached flag is set prior to reconfiguring the
+ * queues.
+ */
+ avp->flags |= AVP_F_DETACHED;
+ rte_wmb();
+
+ /*
+ * re-run the device create utility which will parse the new host info
+ * and setup the AVP device queue pointers.
+ */
+ ret = avp_dev_create(AVP_DEV_TO_PCI(eth_dev), eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to re-create AVP device, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ if (avp->flags & AVP_F_CONFIGURED) {
+ /*
+ * Update the receive queue mapping to handle cases where the
+ * source and destination hosts have different queue
+ * requirements. As long as the DETACHED flag is asserted the
+ * queue table should not be referenced so it should be safe to
+ * update it.
+ */
+ _avp_set_queue_counts(eth_dev);
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
+ _avp_set_rx_queue_mappings(eth_dev, i);
+
+ /*
+ * Update the host with our config details so that it knows the
+ * device is active.
+ */
+ memset(&config, 0, sizeof(config));
+ config.device_id = avp->device_id;
+ config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
+ config.driver_version = AVP_DPDK_DRIVER_VERSION;
+ config.features = avp->features;
+ config.num_tx_queues = avp->num_tx_queues;
+ config.num_rx_queues = avp->num_rx_queues;
+ config.if_up = !!(avp->flags & AVP_F_LINKUP);
+
+ ret = avp_dev_ctrl_set_config(eth_dev, &config);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+ }
+
+ rte_wmb();
+ avp->flags &= ~AVP_F_DETACHED;
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static void
+avp_dev_interrupt_handler(void *data)
+{
+ struct rte_eth_dev *eth_dev = data;
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ uint32_t status, value;
+ int ret;
+
+ if (registers == NULL)
+ rte_panic("no mapped MMIO register space\n");
+
+ /* read the interrupt status register
+ * note: this register clears on read so all raised interrupts must be
+ * handled or remembered for later processing
+ */
+ status = AVP_READ32(
+ RTE_PTR_ADD(registers,
+ RTE_AVP_INTERRUPT_STATUS_OFFSET));
+
+ if (status & RTE_AVP_MIGRATION_INTERRUPT_MASK) {
+ /* handle interrupt based on current status */
+ value = AVP_READ32(
+ RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_STATUS_OFFSET));
+ switch (value) {
+ case RTE_AVP_MIGRATION_DETACHED:
+ ret = avp_dev_detach(eth_dev);
+ break;
+ case RTE_AVP_MIGRATION_ATTACHED:
+ ret = avp_dev_attach(eth_dev);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unexpected migration status, status=%u\n",
+ value);
+ ret = -EINVAL;
+ }
+
+ /* acknowledge the request by writing out our current status */
+ value = (ret == 0 ? value : RTE_AVP_MIGRATION_ERROR);
+ AVP_WRITE32(value,
+ RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_ACK_OFFSET));
+
+ PMD_DRV_LOG(NOTICE, "AVP migration interrupt handled\n");
+ }
+
+ if (status & ~RTE_AVP_MIGRATION_INTERRUPT_MASK)
+ PMD_DRV_LOG(WARNING, "AVP unexpected interrupt, status=0x%08x\n",
+ status);
+
+ /* re-enable UIO interrupt handling */
+ ret = rte_intr_enable(&pci_dev->intr_handle);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to re-enable UIO interrupts, ret=%d\n",
+ ret);
+ /* continue */
+ }
+}
+
+static int
+avp_dev_enable_interrupts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ int ret;
+
+ if (registers == NULL)
+ return -EINVAL;
+
+ /* enable UIO interrupt handling */
+ ret = rte_intr_enable(&pci_dev->intr_handle);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to enable UIO interrupts, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ /* inform the device that all interrupts are enabled */
+ AVP_WRITE32(RTE_AVP_APP_INTERRUPTS_MASK,
+ RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
+
+ return 0;
+}
+
+static int
+avp_dev_disable_interrupts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ int ret;
+
+ if (registers == NULL)
+ return 0;
+
+ /* inform the device that all interrupts are disabled */
+ AVP_WRITE32(RTE_AVP_NO_INTERRUPTS_MASK,
+ RTE_PTR_ADD(registers, RTE_AVP_INTERRUPT_MASK_OFFSET));
+
+ /* enable UIO interrupt handling */
+ ret = rte_intr_disable(&pci_dev->intr_handle);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to disable UIO interrupts, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_setup_interrupts(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ int ret;
+
+ /* register a callback handler with UIO for interrupt notifications */
+ ret = rte_intr_callback_register(&pci_dev->intr_handle,
+ avp_dev_interrupt_handler,
+ (void *)eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to register UIO interrupt callback, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ /* enable interrupt processing */
+ return avp_dev_enable_interrupts(eth_dev);
+}
+
+static int
+avp_dev_migration_pending(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ void *registers = pci_dev->mem_resource[RTE_AVP_PCI_MMIO_BAR].addr;
+ uint32_t value;
+
+ if (registers == NULL)
+ return 0;
+
+ value = AVP_READ32(RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_STATUS_OFFSET));
+ if (value == RTE_AVP_MIGRATION_DETACHED) {
+ /* migration is in progress; ack it if we have not already */
+ AVP_WRITE32(value,
+ RTE_PTR_ADD(registers,
+ RTE_AVP_MIGRATION_ACK_OFFSET));
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * create a AVP device using the supplied device info by first translating it
+ * to guest address space(s).
+ */
+static int
+avp_dev_create(struct rte_pci_device *pci_dev,
+ struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_info *host_info;
+ struct rte_mem_resource *resource;
+ unsigned int i;
+
+ resource = &pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR];
+ if (resource->addr == NULL) {
+ PMD_DRV_LOG(ERR, "BAR%u is not mapped\n",
+ RTE_AVP_PCI_DEVICE_BAR);
+ return -EFAULT;
+ }
+ host_info = (struct rte_avp_device_info *)resource->addr;
+
+ if ((host_info->magic != RTE_AVP_DEVICE_MAGIC) ||
+ avp_dev_version_check(host_info->version)) {
+ PMD_DRV_LOG(ERR, "Invalid AVP PCI device, magic 0x%08x version 0x%08x > 0x%08x\n",
+ host_info->magic, host_info->version,
+ AVP_DPDK_DRIVER_VERSION);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP host device is v%u.%u.%u\n",
+ RTE_AVP_GET_RELEASE_VERSION(host_info->version),
+ RTE_AVP_GET_MAJOR_VERSION(host_info->version),
+ RTE_AVP_GET_MINOR_VERSION(host_info->version));
+
+ PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u TX queue(s)\n",
+ host_info->min_tx_queues, host_info->max_tx_queues);
+ PMD_DRV_LOG(DEBUG, "AVP host supports %u to %u RX queue(s)\n",
+ host_info->min_rx_queues, host_info->max_rx_queues);
+ PMD_DRV_LOG(DEBUG, "AVP host supports features 0x%08x\n",
+ host_info->features);
+
+ if (avp->magic != AVP_ETHDEV_MAGIC) {
+ /*
+ * First time initialization (i.e., not during a VM
+ * migration)
+ */
+ memset(avp, 0, sizeof(*avp));
+ avp->magic = AVP_ETHDEV_MAGIC;
+ avp->dev_data = eth_dev->data;
+ avp->port_id = eth_dev->data->port_id;
+ avp->host_mbuf_size = host_info->mbuf_size;
+ avp->host_features = host_info->features;
+ rte_spinlock_init(&avp->lock);
+ memcpy(&avp->ethaddr.addr_bytes[0],
+ host_info->ethaddr, ETHER_ADDR_LEN);
+ /* adjust max values to not exceed our max */
+ avp->max_tx_queues =
+ RTE_MIN(host_info->max_tx_queues, RTE_AVP_MAX_QUEUES);
+ avp->max_rx_queues =
+ RTE_MIN(host_info->max_rx_queues, RTE_AVP_MAX_QUEUES);
+ } else {
+ /* Re-attaching during migration */
+
+ /* TODO... requires validation of host values */
+ if ((host_info->features & avp->features) != avp->features) {
+ PMD_DRV_LOG(ERR, "AVP host features mismatched; 0x%08x, host=0x%08x\n",
+ avp->features, host_info->features);
+ /* this should not be possible; continue for now */
+ }
+ }
+
+ /* the device id is allowed to change over migrations */
+ avp->device_id = host_info->device_id;
+
+ /* translate incoming host addresses to guest address space */
+ PMD_DRV_LOG(DEBUG, "AVP first host tx queue at 0x%" PRIx64 "\n",
+ host_info->tx_phys);
+ PMD_DRV_LOG(DEBUG, "AVP first host alloc queue at 0x%" PRIx64 "\n",
+ host_info->alloc_phys);
+ for (i = 0; i < avp->max_tx_queues; i++) {
+ avp->tx_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->tx_phys + (i * host_info->tx_size));
+
+ avp->alloc_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->alloc_phys + (i * host_info->alloc_size));
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP first host rx queue at 0x%" PRIx64 "\n",
+ host_info->rx_phys);
+ PMD_DRV_LOG(DEBUG, "AVP first host free queue at 0x%" PRIx64 "\n",
+ host_info->free_phys);
+ for (i = 0; i < avp->max_rx_queues; i++) {
+ avp->rx_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->rx_phys + (i * host_info->rx_size));
+ avp->free_q[i] = avp_dev_translate_address(eth_dev,
+ host_info->free_phys + (i * host_info->free_size));
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP host request queue at 0x%" PRIx64 "\n",
+ host_info->req_phys);
+ PMD_DRV_LOG(DEBUG, "AVP host response queue at 0x%" PRIx64 "\n",
+ host_info->resp_phys);
+ PMD_DRV_LOG(DEBUG, "AVP host sync address at 0x%" PRIx64 "\n",
+ host_info->sync_phys);
+ PMD_DRV_LOG(DEBUG, "AVP host mbuf address at 0x%" PRIx64 "\n",
+ host_info->mbuf_phys);
+ avp->req_q = avp_dev_translate_address(eth_dev, host_info->req_phys);
+ avp->resp_q = avp_dev_translate_address(eth_dev, host_info->resp_phys);
+ avp->sync_addr =
+ avp_dev_translate_address(eth_dev, host_info->sync_phys);
+ avp->mbuf_addr =
+ avp_dev_translate_address(eth_dev, host_info->mbuf_phys);
+
+ /*
+ * store the host mbuf virtual address so that we can calculate
+ * relative offsets for each mbuf as they are processed
+ */
+ avp->host_mbuf_addr = host_info->mbuf_va;
+ avp->host_sync_addr = host_info->sync_va;
+
+ /*
+ * store the maximum packet length that is supported by the host.
+ */
+ avp->max_rx_pkt_len = host_info->max_rx_pkt_len;
+ PMD_DRV_LOG(DEBUG, "AVP host max receive packet length is %u\n",
+ host_info->max_rx_pkt_len);
+
+ return 0;
+}
+
+/*
+ * This function is based on probe() function in avp_pci.c
+ * It returns 0 on success.
+ */
+static int
+eth_avp_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp =
+ AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev;
+ int ret;
+
+ pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ eth_dev->dev_ops = &avp_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &avp_recv_pkts;
+ eth_dev->tx_pkt_burst = &avp_xmit_pkts;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /*
+ * no setup required on secondary processes. All data is saved
+ * in dev_private by the primary process. All resource should
+ * be mapped to the same virtual address so all pointers should
+ * be valid.
+ */
+ if (eth_dev->data->scattered_rx) {
+ PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
+ eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
+ }
+ return 0;
+ }
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ /* Check current migration status */
+ if (avp_dev_migration_pending(eth_dev)) {
+ PMD_DRV_LOG(ERR, "VM live migration operation in progress\n");
+ return -EBUSY;
+ }
+
+ /* Check BAR resources */
+ ret = avp_dev_check_regions(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to validate BAR resources, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ /* Enable interrupts */
+ ret = avp_dev_setup_interrupts(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to enable interrupts, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Handle each subtype */
+ ret = avp_dev_create(pci_dev, eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to create device, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("avp_ethdev", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate %d bytes needed to store MAC addresses\n",
+ ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ /* Get a mac from device config */
+ ether_addr_copy(&avp->ethaddr, &eth_dev->data->mac_addrs[0]);
+
+ return 0;
+}
+
+static int
+eth_avp_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ int ret;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (eth_dev->data == NULL)
+ return 0;
+
+ ret = avp_dev_disable_interrupts(eth_dev);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Failed to disable interrupts, ret=%d\n", ret);
+ return ret;
+ }
+
+ if (eth_dev->data->mac_addrs != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+
+ return 0;
+}
+
+static int
+eth_avp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev,
+ sizeof(struct avp_adapter));
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ ret = eth_avp_dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int
+eth_avp_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ eth_avp_dev_uninit);
+}
+
+static struct rte_pci_driver rte_avp_pmd = {
+ .id_table = pci_id_avp_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_avp_pci_probe,
+ .remove = eth_avp_pci_remove,
+};
+
+static int
+avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
+ struct avp_dev *avp)
+{
+ unsigned int max_rx_pkt_len;
+
+ max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
+ if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
+ (max_rx_pkt_len > avp->host_mbuf_size)) {
+ /*
+ * If the guest MTU is greater than either the host or guest
+ * buffers then chained mbufs have to be enabled in the TX
+ * direction. It is assumed that the application will not need
+ * to send packets larger than their max_rx_pkt_len (MRU).
+ */
+ return 1;
+ }
+
+ if ((avp->max_rx_pkt_len > avp->guest_mbuf_size) ||
+ (avp->max_rx_pkt_len > avp->host_mbuf_size)) {
+ /*
+ * If the host MRU is greater than its own mbuf size or the
+ * guest mbuf size then chained mbufs have to be enabled in the
+ * RX direction.
+ */
+ return 1;
+ }
+
+ return 0;
+}
+
+static int
+avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *pool)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ struct avp_queue *rxq;
+
+ if (rx_queue_id >= eth_dev->data->nb_rx_queues) {
+ PMD_DRV_LOG(ERR, "RX queue id is out of range: rx_queue_id=%u, nb_rx_queues=%u\n",
+ rx_queue_id, eth_dev->data->nb_rx_queues);
+ return -EINVAL;
+ }
+
+ /* Save mbuf pool pointer */
+ avp->pool = pool;
+
+ /* Save the local mbuf size */
+ mbp_priv = rte_mempool_get_priv(pool);
+ avp->guest_mbuf_size = (uint16_t)(mbp_priv->mbuf_data_room_size);
+ avp->guest_mbuf_size -= RTE_PKTMBUF_HEADROOM;
+
+ if (avp_dev_enable_scattered(eth_dev, avp)) {
+ if (!eth_dev->data->scattered_rx) {
+ PMD_DRV_LOG(NOTICE, "AVP device configured for chained mbufs\n");
+ eth_dev->data->scattered_rx = 1;
+ eth_dev->rx_pkt_burst = avp_recv_scattered_pkts;
+ eth_dev->tx_pkt_burst = avp_xmit_scattered_pkts;
+ }
+ }
+
+ PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
+ avp->max_rx_pkt_len,
+ eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ avp->host_mbuf_size,
+ avp->guest_mbuf_size);
+
+ /* allocate a queue object */
+ rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct avp_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate new Rx queue object\n");
+ return -ENOMEM;
+ }
+
+ /* save back pointers to AVP and Ethernet devices */
+ rxq->avp = avp;
+ rxq->dev_data = eth_dev->data;
+ eth_dev->data->rx_queues[rx_queue_id] = (void *)rxq;
+
+ /* setup the queue receive mapping for the current queue. */
+ _avp_set_rx_queue_mappings(eth_dev, rx_queue_id);
+
+ PMD_DRV_LOG(DEBUG, "Rx queue %u setup at %p\n", rx_queue_id, rxq);
+
+ (void)nb_rx_desc;
+ (void)rx_conf;
+ return 0;
+}
+
+static int
+avp_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct avp_queue *txq;
+
+ if (tx_queue_id >= eth_dev->data->nb_tx_queues) {
+ PMD_DRV_LOG(ERR, "TX queue id is out of range: tx_queue_id=%u, nb_tx_queues=%u\n",
+ tx_queue_id, eth_dev->data->nb_tx_queues);
+ return -EINVAL;
+ }
+
+ /* allocate a queue object */
+ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct avp_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate new Tx queue object\n");
+ return -ENOMEM;
+ }
+
+ /* only the configured set of transmit queues are used */
+ txq->queue_id = tx_queue_id;
+ txq->queue_base = tx_queue_id;
+ txq->queue_limit = tx_queue_id;
+
+ /* save back pointers to AVP and Ethernet devices */
+ txq->avp = avp;
+ txq->dev_data = eth_dev->data;
+ eth_dev->data->tx_queues[tx_queue_id] = (void *)txq;
+
+ PMD_DRV_LOG(DEBUG, "Tx queue %u setup at %p\n", tx_queue_id, txq);
+
+ (void)nb_tx_desc;
+ (void)tx_conf;
+ return 0;
+}
+
+static inline int
+_avp_cmp_ether_addr(struct ether_addr *a, struct ether_addr *b)
+{
+ uint16_t *_a = (uint16_t *)&a->addr_bytes[0];
+ uint16_t *_b = (uint16_t *)&b->addr_bytes[0];
+ return (_a[0] ^ _b[0]) | (_a[1] ^ _b[1]) | (_a[2] ^ _b[2]);
+}
+
+static inline int
+_avp_mac_filter(struct avp_dev *avp, struct rte_mbuf *m)
+{
+ struct ether_hdr *eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+ if (likely(_avp_cmp_ether_addr(&avp->ethaddr, &eth->d_addr) == 0)) {
+ /* allow all packets destined to our address */
+ return 0;
+ }
+
+ if (likely(is_broadcast_ether_addr(&eth->d_addr))) {
+ /* allow all broadcast packets */
+ return 0;
+ }
+
+ if (likely(is_multicast_ether_addr(&eth->d_addr))) {
+ /* allow all multicast packets */
+ return 0;
+ }
+
+ if (avp->flags & AVP_F_PROMISC) {
+ /* allow all packets when in promiscuous mode */
+ return 0;
+ }
+
+ return -1;
+}
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
+static inline void
+__avp_dev_buffer_sanity_check(struct avp_dev *avp, struct rte_avp_desc *buf)
+{
+ struct rte_avp_desc *first_buf;
+ struct rte_avp_desc *pkt_buf;
+ unsigned int pkt_len;
+ unsigned int nb_segs;
+ void *pkt_data;
+ unsigned int i;
+
+ first_buf = avp_dev_translate_buffer(avp, buf);
+
+ i = 0;
+ pkt_len = 0;
+ nb_segs = first_buf->nb_segs;
+ do {
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ if (pkt_buf == NULL)
+ rte_panic("bad buffer: segment %u has an invalid address %p\n",
+ i, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ if (pkt_data == NULL)
+ rte_panic("bad buffer: segment %u has a NULL data pointer\n",
+ i);
+ if (pkt_buf->data_len == 0)
+ rte_panic("bad buffer: segment %u has 0 data length\n",
+ i);
+ pkt_len += pkt_buf->data_len;
+ nb_segs--;
+ i++;
+
+ } while (nb_segs && (buf = pkt_buf->next) != NULL);
+
+ if (nb_segs != 0)
+ rte_panic("bad buffer: expected %u segments found %u\n",
+ first_buf->nb_segs, (first_buf->nb_segs - nb_segs));
+ if (pkt_len != first_buf->pkt_len)
+ rte_panic("bad buffer: expected length %u found %u\n",
+ first_buf->pkt_len, pkt_len);
+}
+
+#define avp_dev_buffer_sanity_check(a, b) \
+ __avp_dev_buffer_sanity_check((a), (b))
+
+#else /* RTE_LIBRTE_AVP_DEBUG_BUFFERS */
+
+#define avp_dev_buffer_sanity_check(a, b) do {} while (0)
+
+#endif
+
+/*
+ * Copy a host buffer chain to a set of mbufs. This function assumes that
+ * there exactly the required number of mbufs to copy all source bytes.
+ */
+static inline struct rte_mbuf *
+avp_dev_copy_from_buffers(struct avp_dev *avp,
+ struct rte_avp_desc *buf,
+ struct rte_mbuf **mbufs,
+ unsigned int count)
+{
+ struct rte_mbuf *m_previous = NULL;
+ struct rte_avp_desc *pkt_buf;
+ unsigned int total_length = 0;
+ unsigned int copy_length;
+ unsigned int src_offset;
+ struct rte_mbuf *m;
+ uint16_t ol_flags;
+ uint16_t vlan_tci;
+ void *pkt_data;
+ unsigned int i;
+
+ avp_dev_buffer_sanity_check(avp, buf);
+
+ /* setup the first source buffer */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ total_length = pkt_buf->pkt_len;
+ src_offset = 0;
+
+ if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
+ ol_flags = PKT_RX_VLAN_PKT;
+ vlan_tci = pkt_buf->vlan_tci;
+ } else {
+ ol_flags = 0;
+ vlan_tci = 0;
+ }
+
+ for (i = 0; (i < count) && (buf != NULL); i++) {
+ /* fill each destination buffer */
+ m = mbufs[i];
+
+ if (m_previous != NULL)
+ m_previous->next = m;
+
+ m_previous = m;
+
+ do {
+ /*
+ * Copy as many source buffers as will fit in the
+ * destination buffer.
+ */
+ copy_length = RTE_MIN((avp->guest_mbuf_size -
+ rte_pktmbuf_data_len(m)),
+ (pkt_buf->data_len -
+ src_offset));
+ rte_memcpy(RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
+ rte_pktmbuf_data_len(m)),
+ RTE_PTR_ADD(pkt_data, src_offset),
+ copy_length);
+ rte_pktmbuf_data_len(m) += copy_length;
+ src_offset += copy_length;
+
+ if (likely(src_offset == pkt_buf->data_len)) {
+ /* need a new source buffer */
+ buf = pkt_buf->next;
+ if (buf != NULL) {
+ pkt_buf = avp_dev_translate_buffer(
+ avp, buf);
+ pkt_data = avp_dev_translate_buffer(
+ avp, pkt_buf->data);
+ src_offset = 0;
+ }
+ }
+
+ if (unlikely(rte_pktmbuf_data_len(m) ==
+ avp->guest_mbuf_size)) {
+ /* need a new destination mbuf */
+ break;
+ }
+
+ } while (buf != NULL);
+ }
+
+ m = mbufs[0];
+ m->ol_flags = ol_flags;
+ m->nb_segs = count;
+ rte_pktmbuf_pkt_len(m) = total_length;
+ m->vlan_tci = vlan_tci;
+
+ __rte_mbuf_sanity_check(m, 1);
+
+ return m;
+}
+
+static uint16_t
+avp_recv_scattered_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
+ struct rte_mbuf *mbufs[RTE_AVP_MAX_MBUF_SEGMENTS];
+ struct avp_dev *avp = rxq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *free_q;
+ struct rte_avp_fifo *rx_q;
+ struct rte_avp_desc *buf;
+ unsigned int count, avail, n;
+ unsigned int guest_mbuf_size;
+ struct rte_mbuf *m;
+ unsigned int required;
+ unsigned int buf_len;
+ unsigned int port_id;
+ unsigned int i;
+
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ return 0;
+ }
+
+ guest_mbuf_size = avp->guest_mbuf_size;
+ port_id = avp->port_id;
+ rx_q = avp->rx_q[rxq->queue_id];
+ free_q = avp->free_q[rxq->queue_id];
+
+ /* setup next queue to service */
+ rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
+ (rxq->queue_id + 1) : rxq->queue_base;
+
+ /* determine how many slots are available in the free queue */
+ count = avp_fifo_free_count(free_q);
+
+ /* determine how many packets are available in the rx queue */
+ avail = avp_fifo_count(rx_q);
+
+ /* determine how many packets can be received */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+ count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
+
+ if (unlikely(count == 0)) {
+ /* no free buffers, or no buffers on the rx queue */
+ return 0;
+ }
+
+ /* retrieve pending packets */
+ n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
+ PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ count, rx_q);
+
+ count = 0;
+ for (i = 0; i < n; i++) {
+ /* prefetch next entry while processing current one */
+ if (i + 1 < n) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+ buf = avp_bufs[i];
+
+ /* Peek into the first buffer to determine the total length */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ buf_len = pkt_buf->pkt_len;
+
+ /* Allocate enough mbufs to receive the entire packet */
+ required = (buf_len + guest_mbuf_size - 1) / guest_mbuf_size;
+ if (rte_pktmbuf_alloc_bulk(avp->pool, mbufs, required)) {
+ rxq->dev_data->rx_mbuf_alloc_failed++;
+ continue;
+ }
+
+ /* Copy the data from the buffers to our mbufs */
+ m = avp_dev_copy_from_buffers(avp, buf, mbufs, required);
+
+ /* finalize mbuf */
+ m->port = port_id;
+
+ if (_avp_mac_filter(avp, m) != 0) {
+ /* silently discard packets not destined to our MAC */
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ /* return new mbuf to caller */
+ rx_pkts[count++] = m;
+ rxq->bytes += buf_len;
+ }
+
+ rxq->packets += count;
+
+ /* return the buffers to the free queue */
+ avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
+
+ return count;
+}
+
+
+static uint16_t
+avp_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_RX_BURST];
+ struct avp_dev *avp = rxq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *free_q;
+ struct rte_avp_fifo *rx_q;
+ unsigned int count, avail, n;
+ unsigned int pkt_len;
+ struct rte_mbuf *m;
+ char *pkt_data;
+ unsigned int i;
+
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ return 0;
+ }
+
+ rx_q = avp->rx_q[rxq->queue_id];
+ free_q = avp->free_q[rxq->queue_id];
+
+ /* setup next queue to service */
+ rxq->queue_id = (rxq->queue_id < rxq->queue_limit) ?
+ (rxq->queue_id + 1) : rxq->queue_base;
+
+ /* determine how many slots are available in the free queue */
+ count = avp_fifo_free_count(free_q);
+
+ /* determine how many packets are available in the rx queue */
+ avail = avp_fifo_count(rx_q);
+
+ /* determine how many packets can be received */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+ count = RTE_MIN(count, (unsigned int)AVP_MAX_RX_BURST);
+
+ if (unlikely(count == 0)) {
+ /* no free buffers, or no buffers on the rx queue */
+ return 0;
+ }
+
+ /* retrieve pending packets */
+ n = avp_fifo_get(rx_q, (void **)&avp_bufs, count);
+ PMD_RX_LOG(DEBUG, "Receiving %u packets from Rx queue at %p\n",
+ count, rx_q);
+
+ count = 0;
+ for (i = 0; i < n; i++) {
+ /* prefetch next entry while processing current one */
+ if (i < n - 1) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* Adjust host pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ pkt_len = pkt_buf->pkt_len;
+
+ if (unlikely((pkt_len > avp->guest_mbuf_size) ||
+ (pkt_buf->nb_segs > 1))) {
+ /*
+ * application should be using the scattered receive
+ * function
+ */
+ rxq->errors++;
+ continue;
+ }
+
+ /* process each packet to be transmitted */
+ m = rte_pktmbuf_alloc(avp->pool);
+ if (unlikely(m == NULL)) {
+ rxq->dev_data->rx_mbuf_alloc_failed++;
+ continue;
+ }
+
+ /* copy data out of the host buffer to our buffer */
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_memcpy(rte_pktmbuf_mtod(m, void *), pkt_data, pkt_len);
+
+ /* initialize the local mbuf */
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+ m->port = avp->port_id;
+
+ if (pkt_buf->ol_flags & RTE_AVP_RX_VLAN_PKT) {
+ m->ol_flags = PKT_RX_VLAN_PKT;
+ m->vlan_tci = pkt_buf->vlan_tci;
+ }
+
+ if (_avp_mac_filter(avp, m) != 0) {
+ /* silently discard packets not destined to our MAC */
+ rte_pktmbuf_free(m);
+ continue;
+ }
+
+ /* return new mbuf to caller */
+ rx_pkts[count++] = m;
+ rxq->bytes += pkt_len;
+ }
+
+ rxq->packets += count;
+
+ /* return the buffers to the free queue */
+ avp_fifo_put(free_q, (void **)&avp_bufs[0], n);
+
+ return count;
+}
+
+/*
+ * Copy a chained mbuf to a set of host buffers. This function assumes that
+ * there are sufficient destination buffers to contain the entire source
+ * packet.
+ */
+static inline uint16_t
+avp_dev_copy_to_buffers(struct avp_dev *avp,
+ struct rte_mbuf *mbuf,
+ struct rte_avp_desc **buffers,
+ unsigned int count)
+{
+ struct rte_avp_desc *previous_buf = NULL;
+ struct rte_avp_desc *first_buf = NULL;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_desc *buf;
+ size_t total_length;
+ struct rte_mbuf *m;
+ size_t copy_length;
+ size_t src_offset;
+ char *pkt_data;
+ unsigned int i;
+
+ __rte_mbuf_sanity_check(mbuf, 1);
+
+ m = mbuf;
+ src_offset = 0;
+ total_length = rte_pktmbuf_pkt_len(m);
+ for (i = 0; (i < count) && (m != NULL); i++) {
+ /* fill each destination buffer */
+ buf = buffers[i];
+
+ if (i < count - 1) {
+ /* prefetch next entry while processing this one */
+ pkt_buf = avp_dev_translate_buffer(avp, buffers[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, buf);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+
+ /* setup the buffer chain */
+ if (previous_buf != NULL)
+ previous_buf->next = buf;
+ else
+ first_buf = pkt_buf;
+
+ previous_buf = pkt_buf;
+
+ do {
+ /*
+ * copy as many source mbuf segments as will fit in the
+ * destination buffer.
+ */
+ copy_length = RTE_MIN((avp->host_mbuf_size -
+ pkt_buf->data_len),
+ (rte_pktmbuf_data_len(m) -
+ src_offset));
+ rte_memcpy(RTE_PTR_ADD(pkt_data, pkt_buf->data_len),
+ RTE_PTR_ADD(rte_pktmbuf_mtod(m, void *),
+ src_offset),
+ copy_length);
+ pkt_buf->data_len += copy_length;
+ src_offset += copy_length;
+
+ if (likely(src_offset == rte_pktmbuf_data_len(m))) {
+ /* need a new source buffer */
+ m = m->next;
+ src_offset = 0;
+ }
+
+ if (unlikely(pkt_buf->data_len ==
+ avp->host_mbuf_size)) {
+ /* need a new destination buffer */
+ break;
+ }
+
+ } while (m != NULL);
+ }
+
+ first_buf->nb_segs = count;
+ first_buf->pkt_len = total_length;
+
+ if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ first_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
+ first_buf->vlan_tci = mbuf->vlan_tci;
+ }
+
+ avp_dev_buffer_sanity_check(avp, buffers[0]);
+
+ return total_length;
+}
+
+
+static uint16_t
+avp_xmit_scattered_pkts(void *tx_queue,
+ struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rte_avp_desc *avp_bufs[(AVP_MAX_TX_BURST *
+ RTE_AVP_MAX_MBUF_SEGMENTS)];
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct rte_avp_desc *tx_bufs[AVP_MAX_TX_BURST];
+ struct avp_dev *avp = txq->avp;
+ struct rte_avp_fifo *alloc_q;
+ struct rte_avp_fifo *tx_q;
+ unsigned int count, avail, n;
+ unsigned int orig_nb_pkts;
+ struct rte_mbuf *m;
+ unsigned int required;
+ unsigned int segments;
+ unsigned int tx_bytes;
+ unsigned int i;
+
+ orig_nb_pkts = nb_pkts;
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ /* TODO ... buffer for X packets then drop? */
+ txq->errors += nb_pkts;
+ return 0;
+ }
+
+ tx_q = avp->tx_q[txq->queue_id];
+ alloc_q = avp->alloc_q[txq->queue_id];
+
+ /* limit the number of transmitted packets to the max burst size */
+ if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
+ nb_pkts = AVP_MAX_TX_BURST;
+
+ /* determine how many buffers are available to copy into */
+ avail = avp_fifo_count(alloc_q);
+ if (unlikely(avail > (AVP_MAX_TX_BURST *
+ RTE_AVP_MAX_MBUF_SEGMENTS)))
+ avail = AVP_MAX_TX_BURST * RTE_AVP_MAX_MBUF_SEGMENTS;
+
+ /* determine how many slots are available in the transmit queue */
+ count = avp_fifo_free_count(tx_q);
+
+ /* determine how many packets can be sent */
+ nb_pkts = RTE_MIN(count, nb_pkts);
+
+ /* determine how many packets will fit in the available buffers */
+ count = 0;
+ segments = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ if (likely(i < (unsigned int)nb_pkts - 1)) {
+ /* prefetch next entry while processing this one */
+ rte_prefetch0(tx_pkts[i + 1]);
+ }
+ required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
+ avp->host_mbuf_size;
+
+ if (unlikely((required == 0) ||
+ (required > RTE_AVP_MAX_MBUF_SEGMENTS)))
+ break;
+ else if (unlikely(required + segments > avail))
+ break;
+ segments += required;
+ count++;
+ }
+ nb_pkts = count;
+
+ if (unlikely(nb_pkts == 0)) {
+ /* no available buffers, or no space on the tx queue */
+ txq->errors += orig_nb_pkts;
+ return 0;
+ }
+
+ PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ nb_pkts, tx_q);
+
+ /* retrieve sufficient send buffers */
+ n = avp_fifo_get(alloc_q, (void **)&avp_bufs, segments);
+ if (unlikely(n != segments)) {
+ PMD_TX_LOG(DEBUG, "Failed to allocate buffers "
+ "n=%u, segments=%u, orig=%u\n",
+ n, segments, orig_nb_pkts);
+ txq->errors += orig_nb_pkts;
+ return 0;
+ }
+
+ tx_bytes = 0;
+ count = 0;
+ for (i = 0; i < nb_pkts; i++) {
+ /* process each packet to be transmitted */
+ m = tx_pkts[i];
+
+ /* determine how many buffers are required for this packet */
+ required = (rte_pktmbuf_pkt_len(m) + avp->host_mbuf_size - 1) /
+ avp->host_mbuf_size;
+
+ tx_bytes += avp_dev_copy_to_buffers(avp, m,
+ &avp_bufs[count], required);
+ tx_bufs[i] = avp_bufs[count];
+ count += required;
+
+ /* free the original mbuf */
+ rte_pktmbuf_free(m);
+ }
+
+ txq->packets += nb_pkts;
+ txq->bytes += tx_bytes;
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_BUFFERS
+ for (i = 0; i < nb_pkts; i++)
+ avp_dev_buffer_sanity_check(avp, tx_bufs[i]);
+#endif
+
+ /* send the packets */
+ n = avp_fifo_put(tx_q, (void **)&tx_bufs[0], nb_pkts);
+ if (unlikely(n != orig_nb_pkts))
+ txq->errors += (orig_nb_pkts - n);
+
+ return n;
+}
+
+
+static uint16_t
+avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct rte_avp_desc *avp_bufs[AVP_MAX_TX_BURST];
+ struct avp_dev *avp = txq->avp;
+ struct rte_avp_desc *pkt_buf;
+ struct rte_avp_fifo *alloc_q;
+ struct rte_avp_fifo *tx_q;
+ unsigned int count, avail, n;
+ struct rte_mbuf *m;
+ unsigned int pkt_len;
+ unsigned int tx_bytes;
+ char *pkt_data;
+ unsigned int i;
+
+ if (unlikely(avp->flags & AVP_F_DETACHED)) {
+ /* VM live migration in progress */
+ /* TODO ... buffer for X packets then drop?! */
+ txq->errors++;
+ return 0;
+ }
+
+ tx_q = avp->tx_q[txq->queue_id];
+ alloc_q = avp->alloc_q[txq->queue_id];
+
+ /* limit the number of transmitted packets to the max burst size */
+ if (unlikely(nb_pkts > AVP_MAX_TX_BURST))
+ nb_pkts = AVP_MAX_TX_BURST;
+
+ /* determine how many buffers are available to copy into */
+ avail = avp_fifo_count(alloc_q);
+
+ /* determine how many slots are available in the transmit queue */
+ count = avp_fifo_free_count(tx_q);
+
+ /* determine how many packets can be sent */
+ count = RTE_MIN(count, avail);
+ count = RTE_MIN(count, nb_pkts);
+
+ if (unlikely(count == 0)) {
+ /* no available buffers, or no space on the tx queue */
+ txq->errors += nb_pkts;
+ return 0;
+ }
+
+ PMD_TX_LOG(DEBUG, "Sending %u packets on Tx queue at %p\n",
+ count, tx_q);
+
+ /* retrieve sufficient send buffers */
+ n = avp_fifo_get(alloc_q, (void **)&avp_bufs, count);
+ if (unlikely(n != count)) {
+ txq->errors++;
+ return 0;
+ }
+
+ tx_bytes = 0;
+ for (i = 0; i < count; i++) {
+ /* prefetch next entry while processing the current one */
+ if (i < count - 1) {
+ pkt_buf = avp_dev_translate_buffer(avp,
+ avp_bufs[i + 1]);
+ rte_prefetch0(pkt_buf);
+ }
+
+ /* process each packet to be transmitted */
+ m = tx_pkts[i];
+
+ /* Adjust pointers for guest addressing */
+ pkt_buf = avp_dev_translate_buffer(avp, avp_bufs[i]);
+ pkt_data = avp_dev_translate_buffer(avp, pkt_buf->data);
+ pkt_len = rte_pktmbuf_pkt_len(m);
+
+ if (unlikely((pkt_len > avp->guest_mbuf_size) ||
+ (pkt_len > avp->host_mbuf_size))) {
+ /*
+ * application should be using the scattered transmit
+ * function; send it truncated to avoid the performance
+ * hit of having to manage returning the already
+ * allocated buffer to the free list. This should not
+ * happen since the application should have set the
+ * max_rx_pkt_len based on its MTU and it should be
+ * policing its own packet sizes.
+ */
+ txq->errors++;
+ pkt_len = RTE_MIN(avp->guest_mbuf_size,
+ avp->host_mbuf_size);
+ }
+
+ /* copy data out of our mbuf and into the AVP buffer */
+ rte_memcpy(pkt_data, rte_pktmbuf_mtod(m, void *), pkt_len);
+ pkt_buf->pkt_len = pkt_len;
+ pkt_buf->data_len = pkt_len;
+ pkt_buf->nb_segs = 1;
+ pkt_buf->next = NULL;
+
+ if (m->ol_flags & PKT_TX_VLAN_PKT) {
+ pkt_buf->ol_flags |= RTE_AVP_TX_VLAN_PKT;
+ pkt_buf->vlan_tci = m->vlan_tci;
+ }
+
+ tx_bytes += pkt_len;
+
+ /* free the original mbuf */
+ rte_pktmbuf_free(m);
+ }
+
+ txq->packets += count;
+ txq->bytes += tx_bytes;
+
+ /* send the packets */
+ n = avp_fifo_put(tx_q, (void **)&avp_bufs[0], count);
+
+ return n;
+}
+
+static void
+avp_dev_rx_queue_release(void *rx_queue)
+{
+ struct avp_queue *rxq = (struct avp_queue *)rx_queue;
+ struct avp_dev *avp = rxq->avp;
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ if (data->rx_queues[i] == rxq)
+ data->rx_queues[i] = NULL;
+ }
+}
+
+static void
+avp_dev_tx_queue_release(void *tx_queue)
+{
+ struct avp_queue *txq = (struct avp_queue *)tx_queue;
+ struct avp_dev *avp = txq->avp;
+ struct rte_eth_dev_data *data = avp->dev_data;
+ unsigned int i;
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ if (data->tx_queues[i] == txq)
+ data->tx_queues[i] = NULL;
+ }
+}
+
+static int
+avp_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev = AVP_DEV_TO_PCI(eth_dev);
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_avp_device_info *host_info;
+ struct rte_avp_device_config config;
+ int mask = 0;
+ void *addr;
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ ret = -ENOTSUP;
+ goto unlock;
+ }
+
+ addr = pci_dev->mem_resource[RTE_AVP_PCI_DEVICE_BAR].addr;
+ host_info = (struct rte_avp_device_info *)addr;
+
+ /* Setup required number of queues */
+ _avp_set_queue_counts(eth_dev);
+
+ mask = (ETH_VLAN_STRIP_MASK |
+ ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK);
+ avp_vlan_offload_set(eth_dev, mask);
+
+ /* update device config */
+ memset(&config, 0, sizeof(config));
+ config.device_id = host_info->device_id;
+ config.driver_type = RTE_AVP_DRIVER_TYPE_DPDK;
+ config.driver_version = AVP_DPDK_DRIVER_VERSION;
+ config.features = avp->features;
+ config.num_tx_queues = avp->num_tx_queues;
+ config.num_rx_queues = avp->num_rx_queues;
+
+ ret = avp_dev_ctrl_set_config(eth_dev, &config);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Config request failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ avp->flags |= AVP_F_CONFIGURED;
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static int
+avp_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ ret = -ENOTSUP;
+ goto unlock;
+ }
+
+ /* disable features that we do not support */
+ eth_dev->data->dev_conf.rxmode.hw_ip_checksum = 0;
+ eth_dev->data->dev_conf.rxmode.hw_vlan_filter = 0;
+ eth_dev->data->dev_conf.rxmode.hw_vlan_extend = 0;
+ eth_dev->data->dev_conf.rxmode.hw_strip_crc = 0;
+
+ /* update link state */
+ ret = avp_dev_ctrl_set_link_state(eth_dev, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
+ ret);
+ goto unlock;
+ }
+
+ /* remember current link state */
+ avp->flags |= AVP_F_LINKUP;
+
+ ret = 0;
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+ return ret;
+}
+
+static void
+avp_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ goto unlock;
+ }
+
+ /* remember current link state */
+ avp->flags &= ~AVP_F_LINKUP;
+
+ /* update link state */
+ ret = avp_dev_ctrl_set_link_state(eth_dev, 0);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Link state change failed by host, ret=%d\n",
+ ret);
+ }
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static void
+avp_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ int ret;
+
+ rte_spinlock_lock(&avp->lock);
+ if (avp->flags & AVP_F_DETACHED) {
+ PMD_DRV_LOG(ERR, "Operation not supported during VM live migration\n");
+ goto unlock;
+ }
+
+ /* remember current link state */
+ avp->flags &= ~AVP_F_LINKUP;
+ avp->flags &= ~AVP_F_CONFIGURED;
+
+ ret = avp_dev_disable_interrupts(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to disable interrupts\n");
+ /* continue */
+ }
+
+ /* update device state */
+ ret = avp_dev_ctrl_shutdown(eth_dev);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Device shutdown failed by host, ret=%d\n",
+ ret);
+ /* continue */
+ }
+
+unlock:
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static int
+avp_dev_link_update(struct rte_eth_dev *eth_dev,
+ __rte_unused int wait_to_complete)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_eth_link *link = &eth_dev->data->dev_link;
+
+ link->link_speed = ETH_SPEED_NUM_10G;
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_status = !!(avp->flags & AVP_F_LINKUP);
+
+ return -1;
+}
+
+static void
+avp_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ rte_spinlock_lock(&avp->lock);
+ if ((avp->flags & AVP_F_PROMISC) == 0) {
+ avp->flags |= AVP_F_PROMISC;
+ PMD_DRV_LOG(DEBUG, "Promiscuous mode enabled on %u\n",
+ eth_dev->data->port_id);
+ }
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static void
+avp_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ rte_spinlock_lock(&avp->lock);
+ if ((avp->flags & AVP_F_PROMISC) != 0) {
+ avp->flags &= ~AVP_F_PROMISC;
+ PMD_DRV_LOG(DEBUG, "Promiscuous mode disabled on %u\n",
+ eth_dev->data->port_id);
+ }
+ rte_spinlock_unlock(&avp->lock);
+}
+
+static void
+avp_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ dev_info->driver_name = "rte_avp_pmd";
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->max_rx_queues = avp->max_rx_queues;
+ dev_info->max_tx_queues = avp->max_tx_queues;
+ dev_info->min_rx_bufsize = AVP_MIN_RX_BUFSIZE;
+ dev_info->max_rx_pktlen = avp->max_rx_pkt_len;
+ dev_info->max_mac_addrs = AVP_MAX_MAC_ADDRS;
+ if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
+ }
+}
+
+static void
+avp_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (avp->host_features & RTE_AVP_FEATURE_VLAN_OFFLOAD) {
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
+ avp->features |= RTE_AVP_FEATURE_VLAN_OFFLOAD;
+ else
+ avp->features &= ~RTE_AVP_FEATURE_VLAN_OFFLOAD;
+ } else {
+ PMD_DRV_LOG(ERR, "VLAN strip offload not supported\n");
+ }
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
+ PMD_DRV_LOG(ERR, "VLAN filter offload not supported\n");
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK) {
+ if (eth_dev->data->dev_conf.rxmode.hw_vlan_extend)
+ PMD_DRV_LOG(ERR, "VLAN extend offload not supported\n");
+ }
+}
+
+static void
+avp_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *stats)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ struct avp_queue *rxq = avp->dev_data->rx_queues[i];
+
+ if (rxq) {
+ stats->ipackets += rxq->packets;
+ stats->ibytes += rxq->bytes;
+ stats->ierrors += rxq->errors;
+
+ stats->q_ipackets[i] += rxq->packets;
+ stats->q_ibytes[i] += rxq->bytes;
+ stats->q_errors[i] += rxq->errors;
+ }
+ }
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ struct avp_queue *txq = avp->dev_data->tx_queues[i];
+
+ if (txq) {
+ stats->opackets += txq->packets;
+ stats->obytes += txq->bytes;
+ stats->oerrors += txq->errors;
+
+ stats->q_opackets[i] += txq->packets;
+ stats->q_obytes[i] += txq->bytes;
+ stats->q_errors[i] += txq->errors;
+ }
+ }
+}
+
+static void
+avp_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct avp_dev *avp = AVP_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ unsigned int i;
+
+ for (i = 0; i < avp->num_rx_queues; i++) {
+ struct avp_queue *rxq = avp->dev_data->rx_queues[i];
+
+ if (rxq) {
+ rxq->bytes = 0;
+ rxq->packets = 0;
+ rxq->errors = 0;
+ }
+ }
+
+ for (i = 0; i < avp->num_tx_queues; i++) {
+ struct avp_queue *txq = avp->dev_data->tx_queues[i];
+
+ if (txq) {
+ txq->bytes = 0;
+ txq->packets = 0;
+ txq->errors = 0;
+ }
+ }
+}
+
+RTE_PMD_REGISTER_PCI(net_avp, rte_avp_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_avp, pci_id_avp_map);
diff --git a/drivers/net/avp/avp_logs.h b/drivers/net/avp/avp_logs.h
new file mode 100644
index 00000000..252cab7d
--- /dev/null
+++ b/drivers/net/avp/avp_logs.h
@@ -0,0 +1,59 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (c) 2013-2015, Wind River Systems, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2) Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3) Neither the name of Wind River Systems nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AVP_LOGS_H_
+#define _AVP_LOGS_H_
+
+#include <rte_log.h>
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() rx: " fmt, __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s() tx: " fmt, __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_AVP_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#endif /* _AVP_LOGS_H_ */
diff --git a/drivers/net/avp/rte_avp_common.h b/drivers/net/avp/rte_avp_common.h
new file mode 100644
index 00000000..488d7216
--- /dev/null
+++ b/drivers/net/avp/rte_avp_common.h
@@ -0,0 +1,432 @@
+/*-
+ * This file is provided under a dual BSD/LGPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GNU LESSER GENERAL PUBLIC LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * Contact Information:
+ * Wind River Systems, Inc.
+ *
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014-2017 Wind River Systems, Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _RTE_AVP_COMMON_H_
+#define _RTE_AVP_COMMON_H_
+
+#ifdef __KERNEL__
+#include <linux/if.h>
+#define RTE_STD_C11
+#else
+#include <stdint.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_ether.h>
+#include <rte_atomic.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * AVP name is part of network device name.
+ */
+#define RTE_AVP_NAMESIZE 32
+
+/**
+ * AVP alias is a user-defined value used for lookups from secondary
+ * processes. Typically, this is a UUID.
+ */
+#define RTE_AVP_ALIASSIZE 128
+
+/*
+ * Request id.
+ */
+enum rte_avp_req_id {
+ RTE_AVP_REQ_UNKNOWN = 0,
+ RTE_AVP_REQ_CHANGE_MTU,
+ RTE_AVP_REQ_CFG_NETWORK_IF,
+ RTE_AVP_REQ_CFG_DEVICE,
+ RTE_AVP_REQ_SHUTDOWN_DEVICE,
+ RTE_AVP_REQ_MAX,
+};
+
+/**@{ AVP device driver types */
+#define RTE_AVP_DRIVER_TYPE_UNKNOWN 0
+#define RTE_AVP_DRIVER_TYPE_DPDK 1
+#define RTE_AVP_DRIVER_TYPE_KERNEL 2
+#define RTE_AVP_DRIVER_TYPE_QEMU 3
+/**@} */
+
+/**@{ AVP device operational modes */
+#define RTE_AVP_MODE_HOST 0 /**< AVP interface created in host */
+#define RTE_AVP_MODE_GUEST 1 /**< AVP interface created for export to guest */
+#define RTE_AVP_MODE_TRACE 2 /**< AVP interface created for packet tracing */
+/**@} */
+
+/*
+ * Structure for AVP queue configuration query request/result
+ */
+struct rte_avp_device_config {
+ uint64_t device_id; /**< Unique system identifier */
+ uint32_t driver_type; /**< Device Driver type */
+ uint32_t driver_version; /**< Device Driver version */
+ uint32_t features; /**< Negotiated features */
+ uint16_t num_tx_queues; /**< Number of active transmit queues */
+ uint16_t num_rx_queues; /**< Number of active receive queues */
+ uint8_t if_up; /**< 1: interface up, 0: interface down */
+} __attribute__ ((__packed__));
+
+/*
+ * Structure for AVP request.
+ */
+struct rte_avp_request {
+ uint32_t req_id; /**< Request id */
+ RTE_STD_C11
+ union {
+ uint32_t new_mtu; /**< New MTU */
+ uint8_t if_up; /**< 1: interface up, 0: interface down */
+ struct rte_avp_device_config config; /**< Queue configuration */
+ };
+ int32_t result; /**< Result for processing request */
+} __attribute__ ((__packed__));
+
+/*
+ * FIFO struct mapped in a shared memory. It describes a circular buffer FIFO
+ * Write and read should wrap around. FIFO is empty when write == read
+ * Writing should never overwrite the read position
+ */
+struct rte_avp_fifo {
+ volatile unsigned int write; /**< Next position to be written*/
+ volatile unsigned int read; /**< Next position to be read */
+ unsigned int len; /**< Circular buffer length */
+ unsigned int elem_size; /**< Pointer size - for 32/64 bit OS */
+ void *volatile buffer[]; /**< The buffer contains mbuf pointers */
+};
+
+
+/*
+ * AVP packet buffer header used to define the exchange of packet data.
+ */
+struct rte_avp_desc {
+ uint64_t pad0;
+ void *pkt_mbuf; /**< Reference to packet mbuf */
+ uint8_t pad1[14];
+ uint16_t ol_flags; /**< Offload features. */
+ void *next; /**< Reference to next buffer in chain */
+ void *data; /**< Start address of data in segment buffer. */
+ uint16_t data_len; /**< Amount of data in segment buffer. */
+ uint8_t nb_segs; /**< Number of segments */
+ uint8_t pad2;
+ uint16_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
+ uint32_t pad3;
+ uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order). */
+ uint32_t pad4;
+} __attribute__ ((__aligned__(RTE_CACHE_LINE_SIZE), __packed__));
+
+
+/**{ AVP device features */
+#define RTE_AVP_FEATURE_VLAN_OFFLOAD (1 << 0) /**< Emulated HW VLAN offload */
+/**@} */
+
+
+/**@{ Offload feature flags */
+#define RTE_AVP_TX_VLAN_PKT 0x0001 /**< TX packet is a 802.1q VLAN packet. */
+#define RTE_AVP_RX_VLAN_PKT 0x0800 /**< RX packet is a 802.1q VLAN packet. */
+/**@} */
+
+
+/**@{ AVP PCI identifiers */
+#define RTE_AVP_PCI_VENDOR_ID 0x1af4
+#define RTE_AVP_PCI_DEVICE_ID 0x1110
+/**@} */
+
+/**@{ AVP PCI subsystem identifiers */
+#define RTE_AVP_PCI_SUB_VENDOR_ID RTE_AVP_PCI_VENDOR_ID
+#define RTE_AVP_PCI_SUB_DEVICE_ID 0x1104
+/**@} */
+
+/**@{ AVP PCI BAR definitions */
+#define RTE_AVP_PCI_MMIO_BAR 0
+#define RTE_AVP_PCI_MSIX_BAR 1
+#define RTE_AVP_PCI_MEMORY_BAR 2
+#define RTE_AVP_PCI_MEMMAP_BAR 4
+#define RTE_AVP_PCI_DEVICE_BAR 5
+#define RTE_AVP_PCI_MAX_BAR 6
+/**@} */
+
+/**@{ AVP PCI BAR name definitions */
+#define RTE_AVP_MMIO_BAR_NAME "avp-mmio"
+#define RTE_AVP_MSIX_BAR_NAME "avp-msix"
+#define RTE_AVP_MEMORY_BAR_NAME "avp-memory"
+#define RTE_AVP_MEMMAP_BAR_NAME "avp-memmap"
+#define RTE_AVP_DEVICE_BAR_NAME "avp-device"
+/**@} */
+
+/**@{ AVP PCI MSI-X vectors */
+#define RTE_AVP_MIGRATION_MSIX_VECTOR 0 /**< Migration interrupts */
+#define RTE_AVP_MAX_MSIX_VECTORS 1
+/**@} */
+
+/**@} AVP Migration status/ack register values */
+#define RTE_AVP_MIGRATION_NONE 0 /**< Migration never executed */
+#define RTE_AVP_MIGRATION_DETACHED 1 /**< Device attached during migration */
+#define RTE_AVP_MIGRATION_ATTACHED 2 /**< Device reattached during migration */
+#define RTE_AVP_MIGRATION_ERROR 3 /**< Device failed to attach/detach */
+/**@} */
+
+/**@} AVP MMIO Register Offsets */
+#define RTE_AVP_REGISTER_BASE 0
+#define RTE_AVP_INTERRUPT_MASK_OFFSET (RTE_AVP_REGISTER_BASE + 0)
+#define RTE_AVP_INTERRUPT_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 4)
+#define RTE_AVP_MIGRATION_STATUS_OFFSET (RTE_AVP_REGISTER_BASE + 8)
+#define RTE_AVP_MIGRATION_ACK_OFFSET (RTE_AVP_REGISTER_BASE + 12)
+/**@} */
+
+/**@} AVP Interrupt Status Mask */
+#define RTE_AVP_MIGRATION_INTERRUPT_MASK (1 << 1)
+#define RTE_AVP_APP_INTERRUPTS_MASK 0xFFFFFFFF
+#define RTE_AVP_NO_INTERRUPTS_MASK 0
+/**@} */
+
+/*
+ * Maximum number of memory regions to export
+ */
+#define RTE_AVP_MAX_MAPS 2048
+
+/*
+ * Description of a single memory region
+ */
+struct rte_avp_memmap {
+ void *addr;
+ phys_addr_t phys_addr;
+ uint64_t length;
+};
+
+/*
+ * AVP memory mapping validation marker
+ */
+#define RTE_AVP_MEMMAP_MAGIC 0x20131969
+
+/**@{ AVP memory map versions */
+#define RTE_AVP_MEMMAP_VERSION_1 1
+#define RTE_AVP_MEMMAP_VERSION RTE_AVP_MEMMAP_VERSION_1
+/**@} */
+
+/*
+ * Defines a list of memory regions exported from the host to the guest
+ */
+struct rte_avp_memmap_info {
+ uint32_t magic; /**< Memory validation marker */
+ uint32_t version; /**< Data format version */
+ uint32_t nb_maps;
+ struct rte_avp_memmap maps[RTE_AVP_MAX_MAPS];
+};
+
+/*
+ * AVP device memory validation marker
+ */
+#define RTE_AVP_DEVICE_MAGIC 0x20131975
+
+/**@{ AVP device map versions
+ * WARNING: do not change the format or names of these variables. They are
+ * automatically parsed from the build system to generate the SDK package
+ * name.
+ **/
+#define RTE_AVP_RELEASE_VERSION_1 1
+#define RTE_AVP_RELEASE_VERSION RTE_AVP_RELEASE_VERSION_1
+#define RTE_AVP_MAJOR_VERSION_0 0
+#define RTE_AVP_MAJOR_VERSION_1 1
+#define RTE_AVP_MAJOR_VERSION_2 2
+#define RTE_AVP_MAJOR_VERSION RTE_AVP_MAJOR_VERSION_2
+#define RTE_AVP_MINOR_VERSION_0 0
+#define RTE_AVP_MINOR_VERSION_1 1
+#define RTE_AVP_MINOR_VERSION_13 13
+#define RTE_AVP_MINOR_VERSION RTE_AVP_MINOR_VERSION_13
+/**@} */
+
+
+/**
+ * Generates a 32-bit version number from the specified version number
+ * components
+ */
+#define RTE_AVP_MAKE_VERSION(_release, _major, _minor) \
+((((_release) & 0xffff) << 16) | (((_major) & 0xff) << 8) | ((_minor) & 0xff))
+
+
+/**
+ * Represents the current version of the AVP host driver
+ * WARNING: in the current development branch the host and guest driver
+ * version should always be the same. When patching guest features back to
+ * GA releases the host version number should not be updated unless there was
+ * an actual change made to the host driver.
+ */
+#define RTE_AVP_CURRENT_HOST_VERSION \
+RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \
+ RTE_AVP_MAJOR_VERSION_0, \
+ RTE_AVP_MINOR_VERSION_1)
+
+
+/**
+ * Represents the current version of the AVP guest drivers
+ */
+#define RTE_AVP_CURRENT_GUEST_VERSION \
+RTE_AVP_MAKE_VERSION(RTE_AVP_RELEASE_VERSION_1, \
+ RTE_AVP_MAJOR_VERSION_2, \
+ RTE_AVP_MINOR_VERSION_13)
+
+/**
+ * Access AVP device version values
+ */
+#define RTE_AVP_GET_RELEASE_VERSION(_version) (((_version) >> 16) & 0xffff)
+#define RTE_AVP_GET_MAJOR_VERSION(_version) (((_version) >> 8) & 0xff)
+#define RTE_AVP_GET_MINOR_VERSION(_version) ((_version) & 0xff)
+/**@}*/
+
+
+/**
+ * Remove the minor version number so that only the release and major versions
+ * are used for comparisons.
+ */
+#define RTE_AVP_STRIP_MINOR_VERSION(_version) ((_version) >> 8)
+
+
+/**
+ * Defines the number of mbuf pools supported per device (1 per socket)
+ */
+#define RTE_AVP_MAX_MEMPOOLS 8
+
+/*
+ * Defines address translation parameters for each support mbuf pool
+ */
+struct rte_avp_mempool_info {
+ void *addr;
+ phys_addr_t phys_addr;
+ uint64_t length;
+};
+
+/*
+ * Struct used to create a AVP device. Passed to the kernel in IOCTL call or
+ * via inter-VM shared memory when used in a guest.
+ */
+struct rte_avp_device_info {
+ uint32_t magic; /**< Memory validation marker */
+ uint32_t version; /**< Data format version */
+
+ char ifname[RTE_AVP_NAMESIZE]; /**< Network device name for AVP */
+
+ phys_addr_t tx_phys;
+ phys_addr_t rx_phys;
+ phys_addr_t alloc_phys;
+ phys_addr_t free_phys;
+
+ uint32_t features; /**< Supported feature bitmap */
+ uint8_t min_rx_queues; /**< Minimum supported receive/free queues */
+ uint8_t num_rx_queues; /**< Recommended number of receive/free queues */
+ uint8_t max_rx_queues; /**< Maximum supported receive/free queues */
+ uint8_t min_tx_queues; /**< Minimum supported transmit/alloc queues */
+ uint8_t num_tx_queues;
+ /**< Recommended number of transmit/alloc queues */
+ uint8_t max_tx_queues; /**< Maximum supported transmit/alloc queues */
+
+ uint32_t tx_size; /**< Size of each transmit queue */
+ uint32_t rx_size; /**< Size of each receive queue */
+ uint32_t alloc_size; /**< Size of each alloc queue */
+ uint32_t free_size; /**< Size of each free queue */
+
+ /* Used by Ethtool */
+ phys_addr_t req_phys;
+ phys_addr_t resp_phys;
+ phys_addr_t sync_phys;
+ void *sync_va;
+
+ /* mbuf mempool (used when a single memory area is supported) */
+ void *mbuf_va;
+ phys_addr_t mbuf_phys;
+
+ /* mbuf mempools */
+ struct rte_avp_mempool_info pool[RTE_AVP_MAX_MEMPOOLS];
+
+#ifdef __KERNEL__
+ /* Ethernet info */
+ char ethaddr[ETH_ALEN];
+#else
+ char ethaddr[ETHER_ADDR_LEN];
+#endif
+
+ uint8_t mode; /**< device mode, i.e guest, host, trace */
+
+ /* mbuf size */
+ unsigned int mbuf_size;
+
+ /*
+ * unique id to differentiate between two instantiations of the same
+ * AVP device (i.e., the guest needs to know if the device has been
+ * deleted and recreated).
+ */
+ uint64_t device_id;
+
+ uint32_t max_rx_pkt_len; /**< Maximum receive unit size */
+};
+
+#define RTE_AVP_MAX_QUEUES 8 /**< Maximum number of queues per device */
+
+/** Maximum number of chained mbufs in a packet */
+#define RTE_AVP_MAX_MBUF_SEGMENTS 5
+
+#define RTE_AVP_DEVICE "avp"
+
+#define RTE_AVP_IOCTL_TEST _IOWR(0, 1, int)
+#define RTE_AVP_IOCTL_CREATE _IOWR(0, 2, struct rte_avp_device_info)
+#define RTE_AVP_IOCTL_RELEASE _IOWR(0, 3, struct rte_avp_device_info)
+#define RTE_AVP_IOCTL_QUERY _IOWR(0, 4, struct rte_avp_device_config)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_AVP_COMMON_H_ */
diff --git a/drivers/net/avp/rte_avp_fifo.h b/drivers/net/avp/rte_avp_fifo.h
new file mode 100644
index 00000000..803eb80a
--- /dev/null
+++ b/drivers/net/avp/rte_avp_fifo.h
@@ -0,0 +1,169 @@
+/*-
+ * This file is provided under a dual BSD/LGPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GNU LESSER GENERAL PUBLIC LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 Wind River Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2.1 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * Contact Information:
+ * Wind River Systems, Inc.
+ *
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013-2017 Wind River Systems, Inc. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _RTE_AVP_FIFO_H_
+#define _RTE_AVP_FIFO_H_
+
+#include "rte_avp_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __KERNEL__
+/* Write memory barrier for kernel compiles */
+#define AVP_WMB() smp_wmb()
+/* Read memory barrier for kernel compiles */
+#define AVP_RMB() smp_rmb()
+#else
+/* Write memory barrier for userspace compiles */
+#define AVP_WMB() rte_wmb()
+/* Read memory barrier for userspace compiles */
+#define AVP_RMB() rte_rmb()
+#endif
+
+#ifndef __KERNEL__
+#include <rte_debug.h>
+
+/**
+ * Initializes the avp fifo structure
+ */
+static inline void
+avp_fifo_init(struct rte_avp_fifo *fifo, unsigned int size)
+{
+ /* Ensure size is power of 2 */
+ if (size & (size - 1))
+ rte_panic("AVP fifo size must be power of 2\n");
+
+ fifo->write = 0;
+ fifo->read = 0;
+ fifo->len = size;
+ fifo->elem_size = sizeof(void *);
+}
+#endif
+
+/**
+ * Adds num elements into the fifo. Return the number actually written
+ */
+static inline unsigned
+avp_fifo_put(struct rte_avp_fifo *fifo, void **data, unsigned int num)
+{
+ unsigned int i = 0;
+ unsigned int fifo_write = fifo->write;
+ unsigned int fifo_read = fifo->read;
+ unsigned int new_write = fifo_write;
+
+ for (i = 0; i < num; i++) {
+ new_write = (new_write + 1) & (fifo->len - 1);
+
+ if (new_write == fifo_read)
+ break;
+ fifo->buffer[fifo_write] = data[i];
+ fifo_write = new_write;
+ }
+ AVP_WMB();
+ fifo->write = fifo_write;
+ return i;
+}
+
+/**
+ * Get up to num elements from the fifo. Return the number actually read
+ */
+static inline unsigned int
+avp_fifo_get(struct rte_avp_fifo *fifo, void **data, unsigned int num)
+{
+ unsigned int i = 0;
+ unsigned int new_read = fifo->read;
+ unsigned int fifo_write = fifo->write;
+
+ if (new_read == fifo_write)
+ return 0; /* empty */
+
+ for (i = 0; i < num; i++) {
+ if (new_read == fifo_write)
+ break;
+
+ data[i] = fifo->buffer[new_read];
+ new_read = (new_read + 1) & (fifo->len - 1);
+ }
+ AVP_RMB();
+ fifo->read = new_read;
+ return i;
+}
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline unsigned int
+avp_fifo_count(struct rte_avp_fifo *fifo)
+{
+ return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+}
+
+/**
+ * Get the num of available elements in the fifo
+ */
+static inline unsigned int
+avp_fifo_free_count(struct rte_avp_fifo *fifo)
+{
+ return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_AVP_FIFO_H_ */
diff --git a/drivers/net/avp/rte_pmd_avp_version.map b/drivers/net/avp/rte_pmd_avp_version.map
new file mode 100644
index 00000000..af8f3f47
--- /dev/null
+++ b/drivers/net/avp/rte_pmd_avp_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index e971fb66..e1231069 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -29,8 +29,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += ecore_sp.c
SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += elink.c
SRCS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x_vfpf.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += lib/librte_mempool lib/librte_mbuf
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c
index 0d16a737..1a7e1c8e 100644
--- a/drivers/net/bnx2x/bnx2x.c
+++ b/drivers/net/bnx2x/bnx2x.c
@@ -31,7 +31,7 @@
#define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
#define BNX2X_PMD_VERSION_MAJOR 1
#define BNX2X_PMD_VERSION_MINOR 0
-#define BNX2X_PMD_VERSION_REVISION 1
+#define BNX2X_PMD_VERSION_REVISION 5
#define BNX2X_PMD_VERSION_PATCH 1
static inline const char *
@@ -2220,7 +2220,7 @@ int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
}
PMD_TX_LOG(DEBUG,
- "start bd: nbytes %d flags %x vlan %x\n",
+ "start bd: nbytes %d flags %x vlan %x",
tx_start_bd->nbytes,
tx_start_bd->bd_flags.as_bitfield,
tx_start_bd->vlan_or_ethertype);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 57093054..91c5aec2 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -18,6 +18,7 @@
#include <rte_byteorder.h>
#include <rte_spinlock.h>
+#include <rte_io.h>
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
#ifndef __LITTLE_ENDIAN
@@ -1420,8 +1421,7 @@ bnx2x_reg_write8(struct bnx2x_softc *sc, size_t offset, uint8_t val)
{
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
(unsigned long)offset, val);
- *((volatile uint8_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
+ rte_write8(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
}
static inline void
@@ -1434,8 +1434,8 @@ bnx2x_reg_write16(struct bnx2x_softc *sc, size_t offset, uint16_t val)
#endif
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%04x",
(unsigned long)offset, val);
- *((volatile uint16_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
+ rte_write16(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
+
}
static inline void
@@ -1449,8 +1449,7 @@ bnx2x_reg_write32(struct bnx2x_softc *sc, size_t offset, uint32_t val)
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
- *((volatile uint32_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)) = val;
+ rte_write32(val, ((uint8_t *)sc->bar[BAR0].base_addr + offset));
}
static inline uint8_t
@@ -1458,8 +1457,7 @@ bnx2x_reg_read8(struct bnx2x_softc *sc, size_t offset)
{
uint8_t val;
- val = (uint8_t)(*((volatile uint8_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)));
+ val = rte_read8((uint8_t *)sc->bar[BAR0].base_addr + offset);
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x",
(unsigned long)offset, val);
@@ -1477,8 +1475,7 @@ bnx2x_reg_read16(struct bnx2x_softc *sc, size_t offset)
(unsigned long)offset);
#endif
- val = (uint16_t)(*((volatile uint16_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)));
+ val = rte_read16(((uint8_t *)sc->bar[BAR0].base_addr + offset));
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
@@ -1496,8 +1493,7 @@ bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
(unsigned long)offset);
#endif
- val = (uint32_t)(*((volatile uint32_t*)
- ((uintptr_t)sc->bar[BAR0].base_addr + offset)));
+ val = rte_read32(((uint8_t *)sc->bar[BAR0].base_addr + offset));
PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x",
(unsigned long)offset, val);
@@ -1561,11 +1557,9 @@ bnx2x_reg_read32(struct bnx2x_softc *sc, size_t offset)
#define DPM_TRIGGER_TYPE 0x40
/* Doorbell macro */
-#define BNX2X_DB_WRITE(db_bar, val) \
- *((volatile uint32_t *)(db_bar)) = (val)
+#define BNX2X_DB_WRITE(db_bar, val) rte_write32_relaxed((val), (db_bar))
-#define BNX2X_DB_READ(db_bar) \
- *((volatile uint32_t *)(db_bar))
+#define BNX2X_DB_READ(db_bar) rte_read32_relaxed(db_bar)
#define DOORBELL_ADDR(sc, offset) \
(volatile uint32_t *)(((char *)(sc)->bar[BAR1].base_addr + (offset)))
@@ -1983,7 +1977,7 @@ bnx2x_set_rx_mode(struct bnx2x_softc *sc)
static inline int pci_read(struct bnx2x_softc *sc, size_t addr,
void *val, uint8_t size)
{
- if (rte_eal_pci_read_config(sc->pci_dev, val, size, addr) <= 0) {
+ if (rte_pci_read_config(sc->pci_dev, val, size, addr) <= 0) {
PMD_DRV_LOG(ERR, "Can't read from PCI config space");
return ENXIO;
}
@@ -1995,7 +1989,7 @@ static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val)
{
uint16_t val16 = val;
- if (rte_eal_pci_write_config(sc->pci_dev, &val16,
+ if (rte_pci_write_config(sc->pci_dev, &val16,
sizeof(val16), addr) <= 0) {
PMD_DRV_LOG(ERR, "Can't write to PCI config space");
return ENXIO;
@@ -2007,7 +2001,7 @@ static inline int pci_write_word(struct bnx2x_softc *sc, size_t addr, off_t val)
static inline int pci_write_long(struct bnx2x_softc *sc, size_t addr, off_t val)
{
uint32_t val32 = val;
- if (rte_eal_pci_write_config(sc->pci_dev, &val32,
+ if (rte_pci_write_config(sc->pci_dev, &val32,
sizeof(val32), addr) <= 0) {
PMD_DRV_LOG(ERR, "Can't write to PCI config space");
return ENXIO;
diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c
index a8aebbe3..b79cfdb0 100644
--- a/drivers/net/bnx2x/bnx2x_ethdev.c
+++ b/drivers/net/bnx2x/bnx2x_ethdev.c
@@ -12,12 +12,13 @@
#include "bnx2x_rxtx.h"
#include <rte_dev.h>
+#include <rte_ethdev_pci.h>
/*
* The set of PCI devices this driver supports
*/
#define BROADCOM_PCI_VENDOR_ID 0x14E4
-static struct rte_pci_id pci_id_bnx2x_map[] = {
+static const struct rte_pci_id pci_id_bnx2x_map[] = {
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
@@ -33,7 +34,7 @@ static struct rte_pci_id pci_id_bnx2x_map[] = {
{ .vendor_id = 0, }
};
-static struct rte_pci_id pci_id_bnx2xvf_map[] = {
+static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
@@ -119,12 +120,13 @@ bnx2x_interrupt_action(struct rte_eth_dev *dev)
}
static __rte_unused void
-bnx2x_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+bnx2x_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct bnx2x_softc *sc = dev->data->dev_private;
bnx2x_interrupt_action(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(&sc->pci_dev->intr_handle);
}
/*
@@ -187,10 +189,10 @@ bnx2x_dev_start(struct rte_eth_dev *dev)
}
if (IS_PF(sc)) {
- rte_intr_callback_register(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_register(&sc->pci_dev->intr_handle,
bnx2x_interrupt_handler, (void *)dev);
- if(rte_intr_enable(&(dev->pci_dev->intr_handle)))
+ if (rte_intr_enable(&sc->pci_dev->intr_handle))
PMD_DRV_LOG(ERR, "rte_intr_enable failed");
}
@@ -215,8 +217,8 @@ bnx2x_dev_stop(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
if (IS_PF(sc)) {
- rte_intr_disable(&(dev->pci_dev->intr_handle));
- rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
+ rte_intr_disable(&sc->pci_dev->intr_handle);
+ rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
bnx2x_interrupt_handler, (void *)dev);
}
@@ -440,6 +442,7 @@ static void
bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
{
struct bnx2x_softc *sc = dev->data->dev_private;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = sc->max_rx_queues;
dev_info->max_tx_queues = sc->max_tx_queues;
dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
@@ -448,14 +451,17 @@ bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_inf
dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
}
-static void
+static int
bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- if (sc->mac_ops.mac_addr_add)
+ if (sc->mac_ops.mac_addr_add) {
sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
+ return 0;
+ }
+ return -ENOTSUP;
}
static void
@@ -525,7 +531,7 @@ bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
@@ -625,32 +631,57 @@ eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
return bnx2x_common_dev_init(eth_dev, 1);
}
-static struct eth_driver rte_bnx2x_pmd = {
- .pci_drv = {
- .id_table = pci_id_bnx2x_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_bnx2x_dev_init,
- .dev_private_size = sizeof(struct bnx2x_softc),
+static struct rte_pci_driver rte_bnx2x_pmd;
+static struct rte_pci_driver rte_bnx2xvf_pmd;
+
+static int eth_bnx2x_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev, sizeof(struct bnx2x_softc));
+ if (!eth_dev)
+ return -ENOMEM;
+
+ if (pci_drv == &rte_bnx2x_pmd)
+ ret = eth_bnx2x_dev_init(eth_dev);
+ else if (pci_drv == &rte_bnx2xvf_pmd)
+ ret = eth_bnx2xvf_dev_init(eth_dev);
+ else
+ ret = -EINVAL;
+
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int eth_bnx2x_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_bnx2x_pmd = {
+ .id_table = pci_id_bnx2x_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_bnx2x_pci_probe,
+ .remove = eth_bnx2x_pci_remove,
};
/*
* virtual function driver struct
*/
-static struct eth_driver rte_bnx2xvf_pmd = {
- .pci_drv = {
- .id_table = pci_id_bnx2xvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_bnx2xvf_dev_init,
- .dev_private_size = sizeof(struct bnx2x_softc),
+static struct rte_pci_driver rte_bnx2xvf_pmd = {
+ .id_table = pci_id_bnx2xvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_bnx2x_pci_probe,
+ .remove = eth_bnx2x_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
-RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio");
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c
index 170e48fb..5dd4aee7 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.c
+++ b/drivers/net/bnx2x/bnx2x_rxtx.c
@@ -19,7 +19,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
+ dev->device->driver->name, ring_name,
dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
@@ -273,6 +273,8 @@ bnx2x_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->tx_free_thresh = tx_conf->tx_free_thresh ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
+ txq->tx_free_thresh = min(txq->tx_free_thresh,
+ txq->nb_tx_desc - BDS_PER_TX_PKT);
PMD_INIT_LOG(DEBUG, "fp[%02d] req_bd=%u, thresh=%u, usable_bd=%lu, "
"total_bd=%lu, tx_pages=%u",
diff --git a/drivers/net/bnx2x/bnx2x_rxtx.h b/drivers/net/bnx2x/bnx2x_rxtx.h
index dd251aaf..2e38ec26 100644
--- a/drivers/net/bnx2x/bnx2x_rxtx.h
+++ b/drivers/net/bnx2x/bnx2x_rxtx.h
@@ -11,7 +11,7 @@
#ifndef _BNX2X_RXTX_H_
#define _BNX2X_RXTX_H_
-#define DEFAULT_TX_FREE_THRESH 512
+#define DEFAULT_TX_FREE_THRESH 64
#define RTE_PMD_BNX2X_TX_MAX_BURST 1
/**
diff --git a/drivers/net/bnx2x/ecore_sp.c b/drivers/net/bnx2x/ecore_sp.c
index e6fecd88..22f2dc95 100644
--- a/drivers/net/bnx2x/ecore_sp.c
+++ b/drivers/net/bnx2x/ecore_sp.c
@@ -2725,7 +2725,7 @@ static int ecore_mcast_validate_e2(__rte_unused struct bnx2x_softc *sc,
/* DEL command deletes all currently configured MACs */
case ECORE_MCAST_CMD_DEL:
o->set_registry_size(o, 0);
- /* Don't break */
+ /* fall-through */
/* RESTORE command will restore the entire multicast configuration */
case ECORE_MCAST_CMD_RESTORE:
diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
index 53293962..9ffa7dc6 100644
--- a/drivers/net/bnx2x/elink.c
+++ b/drivers/net/bnx2x/elink.c
@@ -5898,6 +5898,7 @@ elink_status_t elink_set_led(struct elink_params *params,
*/
if (!vars->link_up)
break;
+ /* fall-through */
case ELINK_LED_MODE_ON:
if (((params->phy[ELINK_EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BNX2X8727) ||
@@ -11534,11 +11535,13 @@ static void elink_phy_def_cfg(struct elink_params *params,
switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
case PORT_FEATURE_LINK_SPEED_10M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall-through */
case PORT_FEATURE_LINK_SPEED_10M_FULL:
phy->req_line_speed = ELINK_SPEED_10;
break;
case PORT_FEATURE_LINK_SPEED_100M_HALF:
phy->req_duplex = DUPLEX_HALF;
+ /* fall-through */
case PORT_FEATURE_LINK_SPEED_100M_FULL:
phy->req_line_speed = ELINK_SPEED_100;
break;
diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile
index 65aaa929..0fffe356 100644
--- a/drivers/net/bnxt/Makefile
+++ b/drivers/net/bnxt/Makefile
@@ -66,10 +66,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_irq.c
#
SYMLINK-y-include +=
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_eal
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h
index f9f2adb4..83e53761 100644
--- a/drivers/net/bnxt/bnxt_cpr.h
+++ b/drivers/net/bnxt/bnxt_cpr.h
@@ -34,6 +34,8 @@
#ifndef _BNXT_CPR_H_
#define _BNXT_CPR_H_
+#include <rte_io.h>
+
#define CMP_VALID(cmp, raw_cons, ring) \
(!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \
!((raw_cons) & ((ring)->ring_size)))
@@ -50,13 +52,14 @@
#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
#define B_CP_DB_REARM(cpr, raw_cons) \
- (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \
- RING_CMP(cpr->cp_ring_struct, raw_cons)))
+ rte_write32((DB_CP_REARM_FLAGS | \
+ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
+ ((cpr)->cp_doorbell))
#define B_CP_DIS_DB(cpr, raw_cons) \
- rte_smp_wmb(); \
- (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \
- RING_CMP(cpr->cp_ring_struct, raw_cons)))
+ rte_write32((DB_CP_FLAGS | \
+ RING_CMP(((cpr)->cp_ring_struct), raw_cons)), \
+ ((cpr)->cp_doorbell))
struct bnxt_ring;
struct bnxt_cp_ring_info {
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 035fe07a..bb873615 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -36,6 +36,7 @@
#include <rte_dev.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_cycles.h>
@@ -59,25 +60,44 @@ static const char bnxt_version[] =
#define PCI_VENDOR_ID_BROADCOM 0x14E4
+#define BROADCOM_DEV_ID_STRATUS_NIC 0x1614
+#define BROADCOM_DEV_ID_57414_VF 0x16c1
#define BROADCOM_DEV_ID_57301 0x16c8
#define BROADCOM_DEV_ID_57302 0x16c9
#define BROADCOM_DEV_ID_57304_PF 0x16ca
#define BROADCOM_DEV_ID_57304_VF 0x16cb
+#define BROADCOM_DEV_ID_57417_MF 0x16cc
#define BROADCOM_DEV_ID_NS2 0x16cd
+#define BROADCOM_DEV_ID_57311 0x16ce
+#define BROADCOM_DEV_ID_57312 0x16cf
#define BROADCOM_DEV_ID_57402 0x16d0
#define BROADCOM_DEV_ID_57404 0x16d1
#define BROADCOM_DEV_ID_57406_PF 0x16d2
#define BROADCOM_DEV_ID_57406_VF 0x16d3
#define BROADCOM_DEV_ID_57402_MF 0x16d4
#define BROADCOM_DEV_ID_57407_RJ45 0x16d5
+#define BROADCOM_DEV_ID_57412 0x16d6
+#define BROADCOM_DEV_ID_57414 0x16d7
+#define BROADCOM_DEV_ID_57416_RJ45 0x16d8
+#define BROADCOM_DEV_ID_57417_RJ45 0x16d9
#define BROADCOM_DEV_ID_5741X_VF 0x16dc
+#define BROADCOM_DEV_ID_57412_MF 0x16de
+#define BROADCOM_DEV_ID_57314 0x16df
+#define BROADCOM_DEV_ID_57317_RJ45 0x16e0
#define BROADCOM_DEV_ID_5731X_VF 0x16e1
+#define BROADCOM_DEV_ID_57417_SFP 0x16e2
+#define BROADCOM_DEV_ID_57416_SFP 0x16e3
+#define BROADCOM_DEV_ID_57317_SFP 0x16e4
#define BROADCOM_DEV_ID_57404_MF 0x16e7
#define BROADCOM_DEV_ID_57406_MF 0x16e8
#define BROADCOM_DEV_ID_57407_SFP 0x16e9
#define BROADCOM_DEV_ID_57407_MF 0x16ea
+#define BROADCOM_DEV_ID_57414_MF 0x16ec
+#define BROADCOM_DEV_ID_57416_MF 0x16ee
-static struct rte_pci_id bnxt_pci_id_map[] = {
+static const struct rte_pci_id bnxt_pci_id_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_STRATUS_NIC) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57301) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57302) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57304_PF) },
@@ -95,6 +115,21 @@ static struct rte_pci_id bnxt_pci_id_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57407_MF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5741X_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_5731X_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57314) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57311) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57312) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57412_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_RJ45) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57417_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57317_SFP) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57414_MF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BROADCOM_DEV_ID_57416_MF) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -303,6 +338,8 @@ static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev,
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
uint16_t max_vnics, i, j, vpool, vrxq;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+
/* MAC Specifics */
dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR;
dev_info->max_hash_mac_addrs = 0;
@@ -581,9 +618,9 @@ static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
}
}
-static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
- struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool)
+static int bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]);
@@ -591,30 +628,30 @@ static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev,
if (BNXT_VF(bp)) {
RTE_LOG(ERR, PMD, "Cannot add MAC address to a VF interface\n");
- return;
+ return -ENOTSUP;
}
if (!vnic) {
RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool);
- return;
+ return -EINVAL;
}
/* Attach requested MAC address to the new l2_filter */
STAILQ_FOREACH(filter, &vnic->filter, next) {
if (filter->mac_index == index) {
RTE_LOG(ERR, PMD,
"MAC addr already existed for pool %d\n", pool);
- return;
+ return -EINVAL;
}
}
filter = bnxt_alloc_filter(bp);
if (!filter) {
RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
- return;
+ return -ENODEV;
}
STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
filter->mac_index = index;
memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN);
- bnxt_hwrm_set_filter(bp, vnic, filter);
+ return bnxt_hwrm_set_filter(bp, vnic, filter);
}
int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
@@ -743,6 +780,8 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ struct rte_intr_handle *intr_handle
+ = &bp->pdev->intr_handle;
/* Retrieve from the default VNIC */
if (!vnic)
@@ -759,7 +798,7 @@ static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev,
/* EW - need to revisit here copying from u64 to u16 */
memcpy(reta_conf, vnic->rss_table, reta_size);
- if (rte_intr_allow_others(&eth_dev->pci_dev->intr_handle)) {
+ if (rte_intr_allow_others(intr_handle)) {
if (eth_dev->data->dev_conf.intr_conf.lsc != 0)
bnxt_dev_lsc_intr_setup(eth_dev);
}
@@ -968,7 +1007,7 @@ static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev,
* Initialization
*/
-static struct eth_dev_ops bnxt_dev_ops = {
+static const struct eth_dev_ops bnxt_dev_ops = {
.dev_infos_get = bnxt_dev_info_get_op,
.dev_close = bnxt_dev_close_op,
.dev_configure = bnxt_dev_configure_op,
@@ -1009,11 +1048,12 @@ static bool bnxt_vf_pciid(uint16_t id)
static int bnxt_init_board(struct rte_eth_dev *eth_dev)
{
- int rc;
struct bnxt *bp = eth_dev->data->dev_private;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ int rc;
/* enable device (incl. PCI PM wakeup), and bus-mastering */
- if (!eth_dev->pci_dev->mem_resource[0].addr) {
+ if (!pci_dev->mem_resource[0].addr) {
RTE_LOG(ERR, PMD,
"Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
@@ -1021,9 +1061,9 @@ static int bnxt_init_board(struct rte_eth_dev *eth_dev)
}
bp->eth_dev = eth_dev;
- bp->pdev = eth_dev->pci_dev;
+ bp->pdev = pci_dev;
- bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+ bp->bar0 = (void *)pci_dev->mem_resource[0].addr;
if (!bp->bar0) {
RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n");
rc = -ENOMEM;
@@ -1040,9 +1080,12 @@ init_err_disable:
return rc;
}
+static int bnxt_dev_uninit(struct rte_eth_dev *eth_dev);
+
static int
bnxt_dev_init(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
static int version_printed;
struct bnxt *bp;
int rc;
@@ -1050,10 +1093,12 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
if (version_printed++ == 0)
RTE_LOG(INFO, PMD, "%s", bnxt_version);
- rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
bp = eth_dev->data->dev_private;
- if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id))
+ if (bnxt_vf_pciid(pci_dev->id.device_id))
bp->flags |= BNXT_FLAG_VF;
rc = bnxt_init_board(eth_dev);
@@ -1121,15 +1166,15 @@ bnxt_dev_init(struct rte_eth_dev *eth_dev)
RTE_LOG(INFO, PMD,
DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n",
- eth_dev->pci_dev->mem_resource[0].phys_addr,
- eth_dev->pci_dev->mem_resource[0].addr);
+ pci_dev->mem_resource[0].phys_addr,
+ pci_dev->mem_resource[0].addr);
bp->dev_stopped = 0;
return 0;
error_free:
- eth_dev->driver->eth_dev_uninit(eth_dev);
+ bnxt_dev_uninit(eth_dev);
error:
return rc;
}
@@ -1158,18 +1203,26 @@ bnxt_dev_uninit(struct rte_eth_dev *eth_dev) {
return rc;
}
-static struct eth_driver bnxt_rte_pmd = {
- .pci_drv = {
- .id_table = bnxt_pci_id_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
- RTE_PCI_DRV_DETACHABLE | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove
- },
- .eth_dev_init = bnxt_dev_init,
- .eth_dev_uninit = bnxt_dev_uninit,
- .dev_private_size = sizeof(struct bnxt),
+static int bnxt_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct bnxt),
+ bnxt_dev_init);
+}
+
+static int bnxt_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, bnxt_dev_uninit);
+}
+
+static struct rte_pci_driver bnxt_rte_pmd = {
+ .id_table = bnxt_pci_id_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING |
+ RTE_PCI_DRV_INTR_LSC,
+ .probe = bnxt_pci_probe,
+ .remove = bnxt_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 07e71241..3849d1a6 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -50,6 +50,8 @@
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
+#include <rte_io.h>
+
#define HWRM_CMD_TIMEOUT 2000
/*
@@ -72,19 +74,19 @@ static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
/* Write request msg to hwrm channel */
for (i = 0; i < msg_len; i += 4) {
bar = (uint8_t *)bp->bar0 + i;
- *(volatile uint32_t *)bar = *data;
+ rte_write32(*data, bar);
data++;
}
/* Zero the rest of the request space */
for (; i < bp->max_req_len; i += 4) {
bar = (uint8_t *)bp->bar0 + i;
- *(volatile uint32_t *)bar = 0;
+ rte_write32(0, bar);
}
/* Ring channel doorbell */
bar = (uint8_t *)bp->bar0 + 0x100;
- *(volatile uint32_t *)bar = 1;
+ rte_write32(1, bar);
/* Poll for the valid bit */
for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
diff --git a/drivers/net/bnxt/bnxt_irq.c b/drivers/net/bnxt/bnxt_irq.c
index e93585a0..20e17ff5 100644
--- a/drivers/net/bnxt/bnxt_irq.c
+++ b/drivers/net/bnxt/bnxt_irq.c
@@ -45,8 +45,7 @@
* Interrupts
*/
-static void bnxt_int_handler(struct rte_intr_handle *handle __rte_unused,
- void *param)
+static void bnxt_int_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 3f81ffcc..0fafa13f 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -209,6 +209,7 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
*/
int bnxt_alloc_hwrm_rings(struct bnxt *bp)
{
+ struct rte_pci_device *pci_dev = bp->pdev;
unsigned int i;
int rc = 0;
@@ -222,8 +223,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
0, HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
- cpr->cp_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr;
+ cpr->cp_doorbell = pci_dev->mem_resource[2].addr;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id;
}
@@ -242,8 +242,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
idx, HWRM_NA_SIGNATURE);
if (rc)
goto err_out;
- cpr->cp_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
@@ -255,8 +254,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
rxr->rx_prod = 0;
- rxr->rx_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ rxr->rx_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id;
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
@@ -283,8 +281,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
- cpr->cp_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ cpr->cp_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
@@ -296,8 +293,7 @@ int bnxt_alloc_hwrm_rings(struct bnxt *bp)
if (rc)
goto err_out;
- txr->tx_doorbell =
- (char *)bp->eth_dev->pci_dev->mem_resource[2].addr +
+ txr->tx_doorbell = (char *)pci_dev->mem_resource[2].addr +
idx * 0x80;
}
diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c
index 8bf8fee3..0d15bb1e 100644
--- a/drivers/net/bnxt/bnxt_txr.c
+++ b/drivers/net/bnxt/bnxt_txr.c
@@ -298,7 +298,7 @@ static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
nb_tx_pkts++;
else
- RTE_LOG(DEBUG, PMD,
+ RTE_LOG_DP(DEBUG, PMD,
"Unhandled CMP type %02x\n",
CMP_TYPE(txcmp));
raw_cons = NEXT_RAW_CMP(raw_cons);
diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h
index 4c16101b..5b097114 100644
--- a/drivers/net/bnxt/bnxt_txr.h
+++ b/drivers/net/bnxt/bnxt_txr.h
@@ -34,12 +34,12 @@
#ifndef _BNXT_TXR_H_
#define _BNXT_TXR_H_
+#include <rte_io.h>
+
#define MAX_TX_RINGS 16
#define BNXT_TX_PUSH_THRESH 92
-#define B_TX_DB(db, prod) \
- rte_smp_wmb(); \
- (*(uint32_t *)db = (DB_KEY_TX | prod))
+#define B_TX_DB(db, prod) rte_write32((DB_KEY_TX | (prod)), db)
struct bnxt_tx_ring_info {
uint16_t tx_prod;
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 504f2e8b..910c932d 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -58,13 +58,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += rte_eth_bond_alb.c
SYMLINK-y-include += rte_eth_bond.h
SYMLINK-y-include += rte_eth_bond_8023ad.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_kvargs
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_cmdline
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ring
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c
index 2f7ae70c..7b863d6e 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.c
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.c
@@ -888,8 +888,8 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
RTE_ASSERT(port->tx_ring == NULL);
socket_id = rte_eth_devices[slave_id].data->numa_node;
- element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf)
- + RTE_PKTMBUF_HEADROOM;
+ element_size = sizeof(struct slow_protocol_frame) +
+ RTE_PKTMBUF_HEADROOM;
/* The size of the mempool should be at least:
* the sum of the TX descriptors + BOND_MODE_8023AX_SLAVE_TX_PKTS */
@@ -900,11 +900,10 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id)
}
snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id);
- port->mbuf_pool = rte_mempool_create(mem_name,
- total_tx_desc, element_size,
- RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
- sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
- NULL, rte_pktmbuf_init, NULL, socket_id, MEMPOOL_F_NO_SPREAD);
+ port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc,
+ RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ?
+ 32 : RTE_MEMPOOL_CACHE_MAX_SIZE,
+ 0, element_size, socket_id);
/* Any memory allocation failure in initalization is critical because
* resources can't be free, so reinitialization is impossible. */
diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c
index 2a3893a1..36ec65d6 100644
--- a/drivers/net/bonding/rte_eth_bond_api.c
+++ b/drivers/net/bonding/rte_eth_bond_api.c
@@ -37,15 +37,13 @@
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_tcp.h>
+#include <rte_vdev.h>
+#include <rte_kvargs.h>
#include "rte_eth_bond.h"
#include "rte_eth_bond_private.h"
#include "rte_eth_bond_8023ad_private.h"
-#define DEFAULT_POLLING_INTERVAL_10_MS (10)
-
-const char pmd_bond_driver_name[] = "rte_bond_pmd";
-
int
check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
{
@@ -54,7 +52,7 @@ check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
return -1;
/* return 0 if driver name matches */
- return eth_dev->data->drv_name != pmd_bond_driver_name;
+ return eth_dev->data->drv_name != pmd_bond_drv.driver.name;
}
int
@@ -164,172 +162,45 @@ number_of_sockets(void)
int
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
- struct bond_dev_private *internals = NULL;
- struct rte_eth_dev *eth_dev = NULL;
- uint32_t vlan_filter_bmp_size;
-
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
- */
+ struct bond_dev_private *internals;
+ char devargs[52];
+ uint8_t port_id;
+ int ret;
if (name == NULL) {
RTE_BOND_LOG(ERR, "Invalid name specified");
- goto err;
- }
-
- if (socket_id >= number_of_sockets()) {
- RTE_BOND_LOG(ERR,
- "Invalid socket id specified to create bonded device on.");
- goto err;
- }
-
- internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
- if (internals == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
- goto err;
- }
-
- /* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL) {
- RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
- goto err;
- }
-
- eth_dev->data->dev_private = internals;
- eth_dev->data->nb_rx_queues = (uint16_t)1;
- eth_dev->data->nb_tx_queues = (uint16_t)1;
-
- TAILQ_INIT(&(eth_dev->link_intr_cbs));
-
- eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
-
- eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
- socket_id);
- if (eth_dev->data->mac_addrs == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
- goto err;
+ return -EINVAL;
}
- eth_dev->data->dev_started = 0;
- eth_dev->data->promiscuous = 0;
- eth_dev->data->scattered_rx = 0;
- eth_dev->data->all_multicast = 0;
-
- eth_dev->dev_ops = &default_dev_ops;
- eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
- RTE_ETH_DEV_DETACHABLE;
- eth_dev->driver = NULL;
- eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->data->drv_name = pmd_bond_driver_name;
- eth_dev->data->numa_node = socket_id;
-
- rte_spinlock_init(&internals->lock);
-
- internals->port_id = eth_dev->data->port_id;
- internals->mode = BONDING_MODE_INVALID;
- internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
- internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
- internals->xmit_hash = xmit_l2_hash;
- internals->user_defined_mac = 0;
- internals->link_props_set = 0;
-
- internals->link_status_polling_enabled = 0;
-
- internals->link_status_polling_interval_ms = DEFAULT_POLLING_INTERVAL_10_MS;
- internals->link_down_delay_ms = 0;
- internals->link_up_delay_ms = 0;
-
- internals->slave_count = 0;
- internals->active_slave_count = 0;
- internals->rx_offload_capa = 0;
- internals->tx_offload_capa = 0;
- internals->candidate_max_rx_pktlen = 0;
- internals->max_rx_pktlen = 0;
+ ret = snprintf(devargs, sizeof(devargs),
+ "driver=net_bonding,mode=%d,socket_id=%d", mode, socket_id);
+ if (ret < 0 || ret >= (int)sizeof(devargs))
+ return -ENOMEM;
- /* Initially allow to choose any offload type */
- internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ ret = rte_vdev_init(name, devargs);
+ if (ret)
+ return -ENOMEM;
- memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
- memset(internals->slaves, 0, sizeof(internals->slaves));
+ ret = rte_eth_dev_get_port_by_name(name, &port_id);
+ RTE_ASSERT(!ret);
- /* Set mode 4 default configuration */
- bond_mode_8023ad_setup(eth_dev, NULL);
- if (bond_ethdev_mode_set(eth_dev, mode)) {
- RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
- eth_dev->data->port_id, mode);
- goto err;
- }
-
- vlan_filter_bmp_size =
- rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
- internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
- RTE_CACHE_LINE_SIZE);
- if (internals->vlan_filter_bmpmem == NULL) {
- RTE_BOND_LOG(ERR,
- "Failed to allocate vlan bitmap for bonded device %u\n",
- eth_dev->data->port_id);
- goto err;
- }
-
- internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
- internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
- if (internals->vlan_filter_bmp == NULL) {
- RTE_BOND_LOG(ERR,
- "Failed to init vlan bitmap for bonded device %u\n",
- eth_dev->data->port_id);
- rte_free(internals->vlan_filter_bmpmem);
- goto err;
- }
-
- return eth_dev->data->port_id;
+ /*
+ * To make bond_ethdev_configure() happy we need to free the
+ * internals->kvlist here.
+ *
+ * Also see comment in bond_ethdev_configure().
+ */
+ internals = rte_eth_devices[port_id].data->dev_private;
+ rte_kvargs_free(internals->kvlist);
+ internals->kvlist = NULL;
-err:
- rte_free(internals);
- if (eth_dev != NULL) {
- rte_free(eth_dev->data->mac_addrs);
- rte_eth_dev_release_port(eth_dev);
- }
- return -1;
+ return port_id;
}
int
rte_eth_bond_free(const char *name)
{
- struct rte_eth_dev *eth_dev = NULL;
- struct bond_dev_private *internals;
-
- /* now free all data allocation - for eth_dev structure,
- * dummy pci driver and internal (private) data
- */
-
- /* find an ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
- if (eth_dev == NULL)
- return -ENODEV;
-
- internals = eth_dev->data->dev_private;
- if (internals->slave_count != 0)
- return -EBUSY;
-
- if (eth_dev->data->dev_started == 1) {
- bond_ethdev_stop(eth_dev);
- bond_ethdev_close(eth_dev);
- }
-
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
-
- internals = eth_dev->data->dev_private;
- rte_bitmap_free(internals->vlan_filter_bmp);
- rte_free(internals->vlan_filter_bmpmem);
- rte_free(eth_dev->data->dev_private);
- rte_free(eth_dev->data->mac_addrs);
-
- rte_eth_dev_release_port(eth_dev);
-
- return 0;
+ return rte_vdev_uninit(name);
}
static int
diff --git a/drivers/net/bonding/rte_eth_bond_args.c b/drivers/net/bonding/rte_eth_bond_args.c
index 02ecde64..e3bdad9d 100644
--- a/drivers/net/bonding/rte_eth_bond_args.c
+++ b/drivers/net/bonding/rte_eth_bond_args.c
@@ -47,22 +47,30 @@ const char *pmd_bond_init_valid_arguments[] = {
PMD_BOND_XMIT_POLICY_KVARG,
PMD_BOND_SOCKET_ID_KVARG,
PMD_BOND_MAC_ADDR_KVARG,
-
+ "driver",
NULL
};
static inline int
find_port_id_by_pci_addr(const struct rte_pci_addr *pci_addr)
{
+ struct rte_pci_device *pci_dev;
struct rte_pci_addr *eth_pci_addr;
unsigned i;
for (i = 0; i < rte_eth_dev_count(); i++) {
- if (rte_eth_devices[i].pci_dev == NULL)
+ /* Currently populated by rte_eth_copy_pci_info().
+ *
+ * TODO: Once the PCI bus has arrived we should have a better
+ * way to test for being a PCI device or not.
+ */
+ if (rte_eth_devices[i].data->kdrv == RTE_KDRV_UNKNOWN ||
+ rte_eth_devices[i].data->kdrv == RTE_KDRV_NONE)
continue;
- eth_pci_addr = &(rte_eth_devices[i].pci_dev->addr);
+ pci_dev = RTE_DEV_TO_PCI(rte_eth_devices[i].device);
+ eth_pci_addr = &pci_dev->addr;
if (pci_addr->bus == eth_pci_addr->bus &&
pci_addr->devid == eth_pci_addr->devid &&
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c
index a80b6fa9..82959abc 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,6 +36,7 @@
#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_ip.h>
@@ -51,6 +52,7 @@
#include "rte_eth_bond_8023ad_private.h"
#define REORDER_PERIOD_MS 10
+#define DEFAULT_POLLING_INTERVAL_10_MS (10)
#define HASH_L4_PORTS(h) ((h)->src_port ^ (h)->dst_port)
@@ -145,7 +147,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW);
uint16_t num_rx_total = 0; /* Total number of received packets */
uint8_t slaves[RTE_MAX_ETHPORTS];
- uint8_t slave_count;
+ uint8_t slave_count, idx;
uint8_t collecting; /* current slave collecting status */
const uint8_t promisc = internals->promiscuous_en;
@@ -159,12 +161,18 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
memcpy(slaves, internals->active_slaves,
sizeof(internals->active_slaves[0]) * slave_count);
+ idx = internals->active_slave;
+ if (idx >= slave_count) {
+ internals->active_slave = 0;
+ idx = 0;
+ }
for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) {
j = num_rx_total;
- collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[i]], COLLECTING);
+ collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]],
+ COLLECTING);
/* Read packets from this slave */
- num_rx_total += rte_eth_rx_burst(slaves[i], bd_rx_q->queue_id,
+ num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id,
&bufs[num_rx_total], nb_pkts - num_rx_total);
for (k = j; k < 2 && k < num_rx_total; k++)
@@ -187,8 +195,8 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
!is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
if (hdr->ether_type == ether_type_slow_be) {
- bond_mode_8023ad_handle_slow_pkt(internals, slaves[i],
- bufs[j]);
+ bond_mode_8023ad_handle_slow_pkt(
+ internals, slaves[idx], bufs[j]);
} else
rte_pktmbuf_free(bufs[j]);
@@ -201,8 +209,11 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
} else
j++;
}
+ if (unlikely(++idx == slave_count))
+ idx = 0;
}
+ internals->active_slave = idx;
return num_rx_total;
}
@@ -900,7 +911,6 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
num_tx_total += num_send;
- num_not_send += slave_bufs_pkts[RTE_MAX_ETHPORTS] - num_send;
}
return num_tx_total;
@@ -1009,7 +1019,8 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs,
struct port *port = &mode_8023ad_ports[slaves[i]];
slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring,
- slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS);
+ slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS,
+ NULL);
slave_nb_pkts[i] = slave_slow_nb_pkts[i];
for (j = 0; j < slave_slow_nb_pkts[i]; j++)
@@ -1317,8 +1328,6 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
struct bond_rx_queue *bd_rx_q;
struct bond_tx_queue *bd_tx_q;
- uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
- uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
int errval;
uint16_t q_id;
@@ -1362,9 +1371,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
/* Setup Rx Queues */
- /* Use existing queues, if any */
- for (q_id = old_nb_rx_queues;
- q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
+ for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
@@ -1380,9 +1387,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
/* Setup Tx Queues */
- /* Use existing queues, if any */
- for (q_id = old_nb_tx_queues;
- q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
+ for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
@@ -1430,9 +1435,11 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
}
/* If lsc interrupt is set, check initial slave's link status */
- if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ slave_eth_dev->dev_ops->link_update(slave_eth_dev, 0);
bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
+ }
return 0;
}
@@ -1454,6 +1461,9 @@ slave_remove(struct bond_dev_private *internals,
(internals->slave_count - i - 1));
internals->slave_count--;
+
+ /* force reconfiguration of slave interfaces */
+ _rte_eth_dev_reset(slave_eth_dev);
}
static void
@@ -1653,7 +1663,22 @@ void
bond_ethdev_close(struct rte_eth_dev *dev)
{
struct bond_dev_private *internals = dev->data->dev_private;
+ uint8_t bond_port_id = internals->port_id;
+ int skipped = 0;
+
+ RTE_LOG(INFO, EAL, "Closing bonded device %s\n", dev->data->name);
+ while (internals->slave_count != skipped) {
+ uint8_t port_id = internals->slaves[skipped].port_id;
+
+ rte_eth_dev_stop(port_id);
+ if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) {
+ RTE_LOG(ERR, EAL,
+ "Failed to remove port %d from bonded device "
+ "%s\n", port_id, dev->data->name);
+ skipped++;
+ }
+ }
bond_ethdev_free_queues(dev);
rte_bitmap_reset(internals->vlan_filter_bmp);
}
@@ -1668,14 +1693,14 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_mac_addrs = 1;
- dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ?
- internals->candidate_max_rx_pktlen : 2048;
+ dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen
+ ? internals->candidate_max_rx_pktlen
+ : ETHER_MAX_JUMBO_FRAME_LEN;
dev_info->max_rx_queues = (uint16_t)128;
dev_info->max_tx_queues = (uint16_t)512;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
dev_info->rx_offload_capa = internals->rx_offload_capa;
dev_info->tx_offload_capa = internals->tx_offload_capa;
@@ -2235,16 +2260,133 @@ const struct eth_dev_ops default_dev_ops = {
};
static int
-bond_probe(const char *name, const char *params)
+bond_alloc(struct rte_vdev_device *dev, uint8_t mode)
+{
+ const char *name = rte_vdev_device_name(dev);
+ uint8_t socket_id = dev->device.numa_node;
+ struct bond_dev_private *internals = NULL;
+ struct rte_eth_dev *eth_dev = NULL;
+ uint32_t vlan_filter_bmp_size;
+
+ /* now do all data allocation - for eth_dev structure, dummy pci driver
+ * and internal (private) data
+ */
+
+ if (socket_id >= number_of_sockets()) {
+ RTE_BOND_LOG(ERR,
+ "Invalid socket id specified to create bonded device on.");
+ goto err;
+ }
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
+ if (eth_dev == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to allocate rte_eth_dev");
+ goto err;
+ }
+
+ internals = eth_dev->data->dev_private;
+ eth_dev->data->nb_rx_queues = (uint16_t)1;
+ eth_dev->data->nb_tx_queues = (uint16_t)1;
+
+ eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
+ socket_id);
+ if (eth_dev->data->mac_addrs == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
+ goto err;
+ }
+
+ eth_dev->dev_ops = &default_dev_ops;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_DETACHABLE;
+
+ rte_spinlock_init(&internals->lock);
+
+ internals->port_id = eth_dev->data->port_id;
+ internals->mode = BONDING_MODE_INVALID;
+ internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
+ internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
+ internals->xmit_hash = xmit_l2_hash;
+ internals->user_defined_mac = 0;
+ internals->link_props_set = 0;
+
+ internals->link_status_polling_enabled = 0;
+
+ internals->link_status_polling_interval_ms =
+ DEFAULT_POLLING_INTERVAL_10_MS;
+ internals->link_down_delay_ms = 0;
+ internals->link_up_delay_ms = 0;
+
+ internals->slave_count = 0;
+ internals->active_slave_count = 0;
+ internals->rx_offload_capa = 0;
+ internals->tx_offload_capa = 0;
+ internals->candidate_max_rx_pktlen = 0;
+ internals->max_rx_pktlen = 0;
+
+ /* Initially allow to choose any offload type */
+ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+
+ memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
+ memset(internals->slaves, 0, sizeof(internals->slaves));
+
+ /* Set mode 4 default configuration */
+ bond_mode_8023ad_setup(eth_dev, NULL);
+ if (bond_ethdev_mode_set(eth_dev, mode)) {
+ RTE_BOND_LOG(ERR, "Failed to set bonded device %d mode too %d",
+ eth_dev->data->port_id, mode);
+ goto err;
+ }
+
+ vlan_filter_bmp_size =
+ rte_bitmap_get_memory_footprint(ETHER_MAX_VLAN_ID + 1);
+ internals->vlan_filter_bmpmem = rte_malloc(name, vlan_filter_bmp_size,
+ RTE_CACHE_LINE_SIZE);
+ if (internals->vlan_filter_bmpmem == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to allocate vlan bitmap for bonded device %u\n",
+ eth_dev->data->port_id);
+ goto err;
+ }
+
+ internals->vlan_filter_bmp = rte_bitmap_init(ETHER_MAX_VLAN_ID + 1,
+ internals->vlan_filter_bmpmem, vlan_filter_bmp_size);
+ if (internals->vlan_filter_bmp == NULL) {
+ RTE_BOND_LOG(ERR,
+ "Failed to init vlan bitmap for bonded device %u\n",
+ eth_dev->data->port_id);
+ rte_free(internals->vlan_filter_bmpmem);
+ goto err;
+ }
+
+ return eth_dev->data->port_id;
+
+err:
+ rte_free(internals);
+ if (eth_dev != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ rte_eth_dev_release_port(eth_dev);
+ }
+ return -1;
+}
+
+static int
+bond_probe(struct rte_vdev_device *dev)
{
+ const char *name;
struct bond_dev_private *internals;
struct rte_kvargs *kvlist;
uint8_t bonding_mode, socket_id;
int arg_count, port_id;
+ if (!dev)
+ return -EINVAL;
+
+ name = rte_vdev_device_name(dev);
RTE_LOG(INFO, EAL, "Initializing pmd_bond for %s\n", name);
- kvlist = rte_kvargs_parse(params, pmd_bond_init_valid_arguments);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev),
+ pmd_bond_init_valid_arguments);
if (kvlist == NULL)
return -1;
@@ -2281,8 +2423,10 @@ bond_probe(const char *name, const char *params)
socket_id = rte_socket_id();
}
+ dev->device.numa_node = socket_id;
+
/* Create link bonding eth device */
- port_id = rte_eth_bond_create(name, bonding_mode, socket_id);
+ port_id = bond_alloc(dev, bonding_mode);
if (port_id < 0) {
RTE_LOG(ERR, EAL, "Failed to create socket %s in mode %u on "
"socket %u.\n", name, bonding_mode, socket_id);
@@ -2302,21 +2446,51 @@ parse_error:
}
static int
-bond_remove(const char *name)
+bond_remove(struct rte_vdev_device *dev)
{
- int ret;
+ struct rte_eth_dev *eth_dev;
+ struct bond_dev_private *internals;
+ const char *name;
- if (name == NULL)
+ if (!dev)
return -EINVAL;
+ name = rte_vdev_device_name(dev);
RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
- /* free link bonding eth device */
- ret = rte_eth_bond_free(name);
- if (ret < 0)
- RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
+ /* now free all data allocation - for eth_dev structure,
+ * dummy pci driver and internal (private) data
+ */
+
+ /* find an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ RTE_ASSERT(eth_dev->device == &dev->device);
- return ret;
+ internals = eth_dev->data->dev_private;
+ if (internals->slave_count != 0)
+ return -EBUSY;
+
+ if (eth_dev->data->dev_started == 1) {
+ bond_ethdev_stop(eth_dev);
+ bond_ethdev_close(eth_dev);
+ }
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ internals = eth_dev->data->dev_private;
+ rte_bitmap_free(internals->vlan_filter_bmp);
+ rte_free(internals->vlan_filter_bmpmem);
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data->mac_addrs);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
}
/* this part will resolve the slave portids after all the other pdev and vdev
@@ -2566,12 +2740,12 @@ bond_ethdev_configure(struct rte_eth_dev *dev)
return 0;
}
-static struct rte_vdev_driver bond_drv = {
+struct rte_vdev_driver pmd_bond_drv = {
.probe = bond_probe,
.remove = bond_remove,
};
-RTE_PMD_REGISTER_VDEV(net_bonding, bond_drv);
+RTE_PMD_REGISTER_VDEV(net_bonding, pmd_bond_drv);
RTE_PMD_REGISTER_ALIAS(net_bonding, eth_bond);
RTE_PMD_REGISTER_PARAM_STRING(net_bonding,
diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h
index d95d440b..c8db0900 100644
--- a/drivers/net/bonding/rte_eth_bond_private.h
+++ b/drivers/net/bonding/rte_eth_bond_private.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,7 +63,7 @@
extern const char *pmd_bond_init_valid_arguments[];
-extern const char pmd_bond_driver_name[];
+extern struct rte_vdev_driver pmd_bond_drv;
/** Port Queue Mapping Structure */
struct bond_rx_queue {
@@ -144,6 +144,7 @@ struct bond_dev_private {
uint16_t nb_rx_queues; /**< Total number of rx queues */
uint16_t nb_tx_queues; /**< Total number of tx queues*/
+ uint8_t active_slave; /**< Next active_slave to poll */
uint8_t active_slave_count; /**< Number of active slaves */
uint8_t active_slaves[RTE_MAX_ETHPORTS]; /**< Active slave list */
diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile
index bfcc3159..7cef6279 100644
--- a/drivers/net/cxgbe/Makefile
+++ b/drivers/net/cxgbe/Makefile
@@ -81,9 +81,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe_main.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += sge.c
SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h
index 5e3bd509..26807900 100644
--- a/drivers/net/cxgbe/base/adapter.h
+++ b/drivers/net/cxgbe/base/adapter.h
@@ -37,6 +37,7 @@
#define __T4_ADAPTER_H__
#include <rte_mbuf.h>
+#include <rte_io.h>
#include "cxgbe_compat.h"
#include "t4_regs_values.h"
@@ -324,7 +325,7 @@ struct adapter {
int use_unpacked_mode; /* unpacked rx mode state */
};
-#define CXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define CXGBE_PCI_REG(reg) rte_read32(reg)
static inline uint64_t cxgbe_read_addr64(volatile void *addr)
{
@@ -350,16 +351,21 @@ static inline uint32_t cxgbe_read_addr(volatile void *addr)
#define CXGBE_READ_REG64(adap, reg) \
cxgbe_read_addr64(CXGBE_PCI_REG_ADDR((adap), (reg)))
-#define CXGBE_PCI_REG_WRITE(reg, value) ({ \
- CXGBE_PCI_REG((reg)) = (value); })
+#define CXGBE_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
+
+#define CXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((value), (reg))
#define CXGBE_WRITE_REG(adap, reg, value) \
CXGBE_PCI_REG_WRITE(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+#define CXGBE_WRITE_REG_RELAXED(adap, reg, value) \
+ CXGBE_PCI_REG_WRITE_RELAXED(CXGBE_PCI_REG_ADDR((adap), (reg)), (value))
+
static inline uint64_t cxgbe_write_addr64(volatile void *addr, uint64_t val)
{
- CXGBE_PCI_REG(addr) = val;
- CXGBE_PCI_REG(((volatile uint8_t *)(addr) + 4)) = (val >> 32);
+ CXGBE_PCI_REG_WRITE(addr, val);
+ CXGBE_PCI_REG_WRITE(((volatile uint8_t *)(addr) + 4), (val >> 32));
return val;
}
@@ -383,7 +389,7 @@ static inline u32 t4_read_reg(struct adapter *adapter, u32 reg_addr)
}
/**
- * t4_write_reg - write a HW register
+ * t4_write_reg - write a HW register with barrier
* @adapter: the adapter
* @reg_addr: the register address
* @val: the value to write
@@ -398,6 +404,22 @@ static inline void t4_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
}
/**
+ * t4_write_reg_relaxed - write a HW register with no barrier
+ * @adapter: the adapter
+ * @reg_addr: the register address
+ * @val: the value to write
+ *
+ * Write a 32-bit value into the given HW register.
+ */
+static inline void t4_write_reg_relaxed(struct adapter *adapter, u32 reg_addr,
+ u32 val)
+{
+ CXGBE_DEBUG_REG(adapter, "setting register 0x%x to 0x%x\n", reg_addr,
+ val);
+ CXGBE_WRITE_REG_RELAXED(adapter, reg_addr, val);
+}
+
+/**
* t4_read_reg64 - read a 64-bit HW register
* @adapter: the adapter
* @reg_addr: the register address
@@ -456,7 +478,7 @@ static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
{
u32 val32 = val;
- if (rte_eal_pci_write_config(adapter->pdev, &val32, sizeof(val32),
+ if (rte_pci_write_config(adapter->pdev, &val32, sizeof(val32),
addr) < 0)
dev_err(adapter, "Can't write to PCI config space\n");
}
@@ -472,7 +494,7 @@ static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr,
static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr,
u32 *val)
{
- if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val),
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
addr) < 0)
dev_err(adapter, "Can't read from PCI config space\n");
}
@@ -490,7 +512,7 @@ static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
{
u16 val16 = val;
- if (rte_eal_pci_write_config(adapter->pdev, &val16, sizeof(val16),
+ if (rte_pci_write_config(adapter->pdev, &val16, sizeof(val16),
addr) < 0)
dev_err(adapter, "Can't write to PCI config space\n");
}
@@ -506,7 +528,7 @@ static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr,
static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
u16 *val)
{
- if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val),
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
addr) < 0)
dev_err(adapter, "Can't read from PCI config space\n");
}
@@ -522,7 +544,7 @@ static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr,
static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr,
u8 *val)
{
- if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val),
+ if (rte_pci_read_config(adapter->pdev, val, sizeof(*val),
addr) < 0)
dev_err(adapter, "Can't read from PCI config space\n");
}
diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c
index c089b068..9dca8da1 100644
--- a/drivers/net/cxgbe/base/t4_hw.c
+++ b/drivers/net/cxgbe/base/t4_hw.c
@@ -359,6 +359,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox,
struct mbox_entry entry;
u32 pcie_fw = 0;
+ if (!temp)
+ return -ENOMEM;
+
if ((size & 15) || size > MBOX_LEN) {
free(temp);
return -EINVAL;
diff --git a/drivers/net/cxgbe/cxgbe_compat.h b/drivers/net/cxgbe/cxgbe_compat.h
index e68f8f59..1551cbf5 100644
--- a/drivers/net/cxgbe/cxgbe_compat.h
+++ b/drivers/net/cxgbe/cxgbe_compat.h
@@ -45,6 +45,7 @@
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
+#include <rte_io.h>
#define dev_printf(level, fmt, args...) \
RTE_LOG(level, PMD, "rte_cxgbe_pmd: " fmt, ## args)
@@ -254,7 +255,7 @@ static inline unsigned long ilog2(unsigned long n)
static inline void writel(unsigned int val, volatile void __iomem *addr)
{
- *(volatile unsigned int *)addr = val;
+ rte_write32(val, addr);
}
static inline void writeq(u64 val, volatile void __iomem *addr)
@@ -263,4 +264,9 @@ static inline void writeq(u64 val, volatile void __iomem *addr)
writel(val >> 32, (void *)((uintptr_t)addr + 4));
}
+static inline void writel_relaxed(unsigned int val, volatile void __iomem *addr)
+{
+ rte_write32_relaxed(val, addr);
+}
+
#endif /* _CXGBE_COMPAT_H_ */
diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c
index b7f28ebb..34fed84a 100644
--- a/drivers/net/cxgbe/cxgbe_ethdev.c
+++ b/drivers/net/cxgbe/cxgbe_ethdev.c
@@ -57,6 +57,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
@@ -68,7 +69,7 @@
* Macros needed to support the PCI Device ID Table ...
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
- static struct rte_pci_id cxgb4_pci_tbl[] = {
+ static const struct rte_pci_id cxgb4_pci_tbl[] = {
#define CH_PCI_DEVICE_ID_FUNCTION 0x4
#define PCI_VENDOR_ID_CHELSIO 0x1425
@@ -147,6 +148,8 @@ static void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
.nb_align = 1,
};
+ device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
device_info->max_rx_queues = max_queues;
@@ -1005,7 +1008,7 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
@@ -1037,16 +1040,25 @@ out_free_adapter:
return err;
}
-static struct eth_driver rte_cxgbe_pmd = {
- .pci_drv = {
- .id_table = cxgb4_pci_tbl,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_cxgbe_dev_init,
- .dev_private_size = sizeof(struct port_info),
+static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct port_info), eth_cxgbe_dev_init);
+}
+
+static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_cxgbe_pmd = {
+ .id_table = cxgb4_pci_tbl,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_cxgbe_pci_probe,
+ .remove = eth_cxgbe_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
+RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/cxgbe/cxgbe_main.c b/drivers/net/cxgbe/cxgbe_main.c
index 345f9b03..1f230cd5 100644
--- a/drivers/net/cxgbe/cxgbe_main.c
+++ b/drivers/net/cxgbe/cxgbe_main.c
@@ -57,6 +57,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
@@ -1163,16 +1164,13 @@ int cxgbe_probe(struct adapter *adapter)
pi->eth_dev->data = data;
allocate_mac:
- pi->eth_dev->pci_dev = adapter->pdev;
+ pi->eth_dev->device = &adapter->pdev->device;
pi->eth_dev->data->dev_private = pi;
- pi->eth_dev->driver = adapter->eth_dev->driver;
pi->eth_dev->dev_ops = adapter->eth_dev->dev_ops;
pi->eth_dev->tx_pkt_burst = adapter->eth_dev->tx_pkt_burst;
pi->eth_dev->rx_pkt_burst = adapter->eth_dev->rx_pkt_burst;
- rte_eth_copy_pci_info(pi->eth_dev, pi->eth_dev->pci_dev);
-
- TAILQ_INIT(&pi->eth_dev->link_intr_cbs);
+ rte_eth_copy_pci_info(pi->eth_dev, adapter->pdev);
pi->eth_dev->data->mac_addrs = rte_zmalloc(name,
ETHER_ADDR_LEN, 0);
diff --git a/drivers/net/cxgbe/sge.c b/drivers/net/cxgbe/sge.c
index 736f08ce..2f9e12c9 100644
--- a/drivers/net/cxgbe/sge.c
+++ b/drivers/net/cxgbe/sge.c
@@ -338,12 +338,12 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
* mechanism.
*/
if (unlikely(!q->bar2_addr)) {
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
- val | V_QID(q->cntxt_id));
+ t4_write_reg_relaxed(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
+ val | V_QID(q->cntxt_id));
} else {
- writel(val | V_QID(q->bar2_qid),
- (void *)((uintptr_t)q->bar2_addr +
- SGE_UDB_KDOORBELL));
+ writel_relaxed(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr +
+ SGE_UDB_KDOORBELL));
/*
* This Write memory Barrier will force the write to
@@ -890,15 +890,11 @@ static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
struct sge_txq *q = &txq->q;
unsigned int flits, ndesc;
unsigned char type = 0;
- int credits, hw_cidx = ntohs(q->stat->cidx);
- int in_use = q->pidx - hw_cidx + flits_to_desc(q->coalesce.flits);
+ int credits;
/* use coal WR type 1 when no frags are present */
type = (mbuf->nb_segs == 1) ? 1 : 0;
- if (in_use < 0)
- in_use += q->size;
-
if (unlikely(type != q->coalesce.type && q->coalesce.idx))
ship_tx_pkt_coalesce_wr(adap, txq);
@@ -1645,7 +1641,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->size = cxgbe_roundup(iq->size, 16);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.driver.name,
+ eth_dev->data->drv_name,
fwevtq ? "fwq_ring" : "rx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1698,7 +1694,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
fl->size = cxgbe_roundup(fl->size, 8);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.driver.name,
+ eth_dev->data->drv_name,
fwevtq ? "fwq_ring" : "fl_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
@@ -1894,7 +1890,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.driver.name, "tx_ring",
+ eth_dev->data->drv_name, "tx_ring",
eth_dev->data->port_id, queue_id);
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
new file mode 100644
index 00000000..ce65542a
--- /dev/null
+++ b/drivers/net/dpaa2/Makefile
@@ -0,0 +1,70 @@
+# BSD LICENSE
+#
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright (c) 2016 NXP. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Freescale Semiconductor, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa2.a
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2
+CFLAGS += -I$(RTE_SDK)/drivers/net/dpaa2/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
+CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa2_version.map
+
+# library version
+LIBABIVER := 1
+
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += base/dpaa2_hw_dpni.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += dpaa2_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_PMD) += mc/dpni.c
+
+LDLIBS += -lrte_bus_fslmc
+LDLIBS += -lrte_mempool_dpaa2
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
new file mode 100644
index 00000000..3dc60ccc
--- /dev/null
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -0,0 +1,344 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+
+#include "../dpaa2_ethdev.h"
+
+static void
+dpaa2_distset_to_dpkg_profile_cfg(
+ uint32_t req_dist_set,
+ struct dpkg_profile_cfg *kg_cfg);
+
+int
+dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
+ uint32_t req_dist_set)
+{
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_rx_tc_dist_cfg tc_cfg;
+ struct dpkg_profile_cfg kg_cfg;
+ void *p_params;
+ int ret, tc_index = 0;
+
+ p_params = rte_malloc(
+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+ if (!p_params) {
+ RTE_LOG(ERR, PMD, "Memory unavaialble\n");
+ return -ENOMEM;
+ }
+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+
+ dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+ tc_cfg.dist_size = eth_dev->data->nb_rx_queues;
+ tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
+
+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
+ rte_free(p_params);
+ return ret;
+ }
+
+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
+ &tc_cfg);
+ rte_free(p_params);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
+ " err code: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dpaa2_remove_flow_dist(
+ struct rte_eth_dev *eth_dev,
+ uint8_t tc_index)
+{
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_rx_tc_dist_cfg tc_cfg;
+ struct dpkg_profile_cfg kg_cfg;
+ void *p_params;
+ int ret;
+
+ p_params = rte_malloc(
+ NULL, DIST_PARAM_IOVA_SIZE, RTE_CACHE_LINE_SIZE);
+ if (!p_params) {
+ RTE_LOG(ERR, PMD, "Memory unavaialble\n");
+ return -ENOMEM;
+ }
+ memset(p_params, 0, DIST_PARAM_IOVA_SIZE);
+ memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
+
+ tc_cfg.key_cfg_iova = (uint64_t)(DPAA2_VADDR_TO_IOVA(p_params));
+ tc_cfg.dist_size = 0;
+ tc_cfg.dist_mode = DPNI_DIST_MODE_NONE;
+
+ ret = dpni_prepare_key_cfg(&kg_cfg, p_params);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Unable to prepare extract parameters\n");
+ rte_free(p_params);
+ return ret;
+ }
+
+ ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token, tc_index,
+ &tc_cfg);
+ rte_free(p_params);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Setting distribution for Rx failed with"
+ " err code: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void
+dpaa2_distset_to_dpkg_profile_cfg(
+ uint32_t req_dist_set,
+ struct dpkg_profile_cfg *kg_cfg)
+{
+ uint32_t loop = 0, i = 0, dist_field = 0;
+ int l2_configured = 0, l3_configured = 0;
+ int l4_configured = 0, sctp_configured = 0;
+
+ memset(kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
+ while (req_dist_set) {
+ if (req_dist_set % 2 != 0) {
+ dist_field = 1U << loop;
+ switch (dist_field) {
+ case ETH_RSS_L2_PAYLOAD:
+
+ if (l2_configured)
+ break;
+ l2_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_ETH;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_ETH_TYPE;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+ break;
+
+ case ETH_RSS_IPV4:
+ case ETH_RSS_FRAG_IPV4:
+ case ETH_RSS_NONFRAG_IPV4_OTHER:
+ case ETH_RSS_IPV6:
+ case ETH_RSS_FRAG_IPV6:
+ case ETH_RSS_NONFRAG_IPV6_OTHER:
+ case ETH_RSS_IPV6_EX:
+
+ if (l3_configured)
+ break;
+ l3_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_IP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_IP_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_IP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_IP_DST;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_IP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_IP_PROTO;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ kg_cfg->num_extracts++;
+ i++;
+ break;
+
+ case ETH_RSS_NONFRAG_IPV4_TCP:
+ case ETH_RSS_NONFRAG_IPV6_TCP:
+ case ETH_RSS_NONFRAG_IPV4_UDP:
+ case ETH_RSS_NONFRAG_IPV6_UDP:
+ case ETH_RSS_IPV6_TCP_EX:
+ case ETH_RSS_IPV6_UDP_EX:
+
+ if (l4_configured)
+ break;
+ l4_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_TCP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_TCP_PORT_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_TCP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_TCP_PORT_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+ break;
+
+ case ETH_RSS_NONFRAG_IPV4_SCTP:
+ case ETH_RSS_NONFRAG_IPV6_SCTP:
+
+ if (sctp_configured)
+ break;
+ sctp_configured = 1;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_SCTP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_SCTP_PORT_SRC;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+
+ kg_cfg->extracts[i].extract.from_hdr.prot =
+ NET_PROT_SCTP;
+ kg_cfg->extracts[i].extract.from_hdr.field =
+ NH_FLD_SCTP_PORT_DST;
+ kg_cfg->extracts[i].type =
+ DPKG_EXTRACT_FROM_HDR;
+ kg_cfg->extracts[i].extract.from_hdr.type =
+ DPKG_FULL_FIELD;
+ i++;
+ break;
+
+ default:
+ PMD_DRV_LOG(WARNING, "Bad flow distribution"
+ " option %x\n", dist_field);
+ }
+ }
+ req_dist_set = req_dist_set >> 1;
+ loop++;
+ }
+ kg_cfg->num_extracts = i;
+}
+
+int
+dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv,
+ void *blist)
+{
+ /* Function to attach a DPNI with a buffer pool list. Buffer pool list
+ * handle is passed in blist.
+ */
+ int32_t retcode;
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_pools_cfg bpool_cfg;
+ struct dpaa2_bp_list *bp_list = (struct dpaa2_bp_list *)blist;
+ struct dpni_buffer_layout layout;
+ int tot_size;
+
+ /* ... rx buffer layout .
+ * Check alignment for buffer layouts first
+ */
+
+ /* ... rx buffer layout ... */
+ tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
+ tot_size = RTE_ALIGN_CEIL(tot_size,
+ DPAA2_PACKET_LAYOUT_ALIGN);
+
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
+
+ layout.data_head_room =
+ tot_size - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
+ retcode = dpni_set_buffer_layout(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_RX, &layout);
+ if (retcode) {
+ PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout\n",
+ retcode);
+ return retcode;
+ }
+
+ /*Attach buffer pool to the network interface as described by the user*/
+ bpool_cfg.num_dpbp = 1;
+ bpool_cfg.pools[0].dpbp_id = bp_list->buf_pool.dpbp_node->dpbp_id;
+ bpool_cfg.pools[0].backup_pool = 0;
+ bpool_cfg.pools[0].buffer_size =
+ RTE_ALIGN_CEIL(bp_list->buf_pool.size,
+ 256 /*DPAA2_PACKET_LAYOUT_ALIGN*/);
+
+ retcode = dpni_set_pools(dpni, CMD_PRI_LOW, priv->token, &bpool_cfg);
+ if (retcode != 0) {
+ PMD_INIT_LOG(ERR, "Error in attaching the buffer pool list"
+ " bpid = %d Error code = %d\n",
+ bpool_cfg.pools[0].dpbp_id, retcode);
+ return retcode;
+ }
+
+ priv->bp_list = bp_list;
+ return 0;
+}
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
new file mode 100644
index 00000000..9324c6a3
--- /dev/null
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni_annot.h
@@ -0,0 +1,257 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ *
+ * DPNI packet parse results - implementation internal
+ */
+
+#ifndef _DPAA2_HW_DPNI_ANNOT_H_
+#define _DPAA2_HW_DPNI_ANNOT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Annotation valid bits in FD FRC */
+#define DPAA2_FD_FRC_FASV 0x8000
+#define DPAA2_FD_FRC_FAEADV 0x4000
+#define DPAA2_FD_FRC_FAPRV 0x2000
+#define DPAA2_FD_FRC_FAIADV 0x1000
+#define DPAA2_FD_FRC_FASWOV 0x0800
+#define DPAA2_FD_FRC_FAICFDV 0x0400
+
+/* Annotation bits in FD CTRL */
+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */
+#define DPAA2_FD_CTRL_PTA 0x00800000
+#define DPAA2_FD_CTRL_PTV1 0x00400000
+
+/* Frame annotation status */
+struct dpaa2_fas {
+ uint8_t reserved;
+ uint8_t ppid;
+ __le16 ifpid;
+ __le32 status;
+} __packed;
+
+/**
+ * HW Packet Annotation Register structures
+ */
+struct dpaa2_annot_hdr {
+ /**< word1: Frame Annotation Status (8 bytes)*/
+ uint64_t word1;
+
+ /**< word2: Time Stamp (8 bytes)*/
+ uint64_t word2;
+
+ /**< word3: Next Hdr + FAF Extension + FAF (2 + 2 + 4 bytes)*/
+ uint64_t word3;
+
+ /**< word4: Frame Annotation Flags-FAF (8 bytes) */
+ uint64_t word4;
+
+ /**< word5:
+ * ShimOffset_1 + ShimOffset_2 + IPPIDOffset + EthOffset +
+ * LLC+SNAPOffset + VLANTCIOffset_1 + VLANTCIOffset_n +
+ * LastETypeOffset (1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
+ */
+ uint64_t word5;
+
+ /**< word6:
+ * PPPoEOffset + MPLSOffset_1 + MPLSOffset_n + ARPorIPOffset_1
+ * + IPOffset_norMInEncapO + GREOffset + L4Offset +
+ * GTPorESPorIPSecOffset(1 + 1 + 1 + 1 + 1 + 1 + 1 + 1 bytes)
+ */
+ uint64_t word6;
+
+ /**< word7:
+ * RoutingHdrOfset1 + RoutingHdrOfset2 + NxtHdrOffset
+ * + IPv6FragOffset + GrossRunningSum
+ * + RunningSum(1 + 1 + 1 + 1 + 2 + 2 bytes)
+ */
+ uint64_t word7;
+
+ /**< word8:
+ * ParseErrorcode + Soft Parsing Context (1 + 7 bytes)
+ */
+ uint64_t word8;
+};
+
+/**
+ * Internal Macros to get/set Packet annotation header
+ */
+
+/** General Macro to define a particular bit position*/
+#define BIT_POS(x) ((uint64_t)1 << ((x)))
+/** Set a bit in the variable */
+#define BIT_SET_AT_POS(var, pos) ((var) |= (pos))
+/** Reset the bit in the variable */
+#define BIT_RESET_AT_POS(var, pos) ((var) &= ~(pos))
+/** Check the bit is set in the variable */
+#define BIT_ISSET_AT_POS(var, pos) (((var) & (pos)) ? 1 : 0)
+/**
+ * Macrso to define bit position in word3
+ */
+#define NEXT_HDR(var) ((uint64_t)(var) & 0xFFFF000000000000)
+#define FAF_EXTN_IPV6_ROUTE_HDR_PRESENT(var) BIT_POS(16)
+#define FAF_EXTN_RESERVED(var) ((uint64_t)(var) & 0x00007FFF00000000)
+#define FAF_USER_DEFINED_RESERVED(var) ((uint64_t)(var) & 0x00000000FF000000)
+#define SHIM_SHELL_SOFT_PARSING_ERRROR BIT_POS(23)
+#define PARSING_ERROR BIT_POS(22)
+#define L2_ETH_MAC_PRESENT BIT_POS(21)
+#define L2_ETH_MAC_UNICAST BIT_POS(20)
+#define L2_ETH_MAC_MULTICAST BIT_POS(19)
+#define L2_ETH_MAC_BROADCAST BIT_POS(18)
+#define L2_ETH_FRAME_IS_BPDU BIT_POS(17)
+#define L2_ETH_FCOE_PRESENT BIT_POS(16)
+#define L2_ETH_FIP_PRESENT BIT_POS(15)
+#define L2_ETH_PARSING_ERROR BIT_POS(14)
+#define L2_LLC_SNAP_PRESENT BIT_POS(13)
+#define L2_UNKNOWN_LLC_OUI BIT_POS(12)
+#define L2_LLC_SNAP_ERROR BIT_POS(11)
+#define L2_VLAN_1_PRESENT BIT_POS(10)
+#define L2_VLAN_N_PRESENT BIT_POS(9)
+#define L2_VLAN_CFI_BIT_PRESENT BIT_POS(8)
+#define L2_VLAN_PARSING_ERROR BIT_POS(7)
+#define L2_PPPOE_PPP_PRESENT BIT_POS(6)
+#define L2_PPPOE_PPP_PARSING_ERROR BIT_POS(5)
+#define L2_MPLS_1_PRESENT BIT_POS(4)
+#define L2_MPLS_N_PRESENT BIT_POS(3)
+#define L2_MPLS_PARSING_ERROR BIT_POS(2)
+#define L2_ARP_PRESENT BIT_POS(1)
+#define L2_ARP_PARSING_ERROR BIT_POS(0)
+/**
+ * Macrso to define bit position in word4
+ */
+#define L2_UNKNOWN_PROTOCOL BIT_POS(63)
+#define L2_SOFT_PARSING_ERROR BIT_POS(62)
+#define L3_IPV4_1_PRESENT BIT_POS(61)
+#define L3_IPV4_1_UNICAST BIT_POS(60)
+#define L3_IPV4_1_MULTICAST BIT_POS(59)
+#define L3_IPV4_1_BROADCAST BIT_POS(58)
+#define L3_IPV4_N_PRESENT BIT_POS(57)
+#define L3_IPV4_N_UNICAST BIT_POS(56)
+#define L3_IPV4_N_MULTICAST BIT_POS(55)
+#define L3_IPV4_N_BROADCAST BIT_POS(54)
+#define L3_IPV6_1_PRESENT BIT_POS(53)
+#define L3_IPV6_1_UNICAST BIT_POS(52)
+#define L3_IPV6_1_MULTICAST BIT_POS(51)
+#define L3_IPV6_N_PRESENT BIT_POS(50)
+#define L3_IPV6_N_UNICAST BIT_POS(49)
+#define L3_IPV6_N_MULTICAST BIT_POS(48)
+#define L3_IP_1_OPT_PRESENT BIT_POS(47)
+#define L3_IP_1_UNKNOWN_PROTOCOL BIT_POS(46)
+#define L3_IP_1_MORE_FRAGMENT BIT_POS(45)
+#define L3_IP_1_FIRST_FRAGMENT BIT_POS(44)
+#define L3_IP_1_PARSING_ERROR BIT_POS(43)
+#define L3_IP_N_OPT_PRESENT BIT_POS(42)
+#define L3_IP_N_UNKNOWN_PROTOCOL BIT_POS(41)
+#define L3_IP_N_MORE_FRAGMENT BIT_POS(40)
+#define L3_IP_N_FIRST_FRAGMENT BIT_POS(39)
+#define L3_PROTO_ICMP_PRESENT BIT_POS(38)
+#define L3_PROTO_IGMP_PRESENT BIT_POS(37)
+#define L3_PROTO_ICMPV6_PRESENT BIT_POS(36)
+#define L3_PROTO_UDP_LIGHT_PRESENT BIT_POS(35)
+#define L3_IP_N_PARSING_ERROR BIT_POS(34)
+#define L3_MIN_ENCAP_PRESENT BIT_POS(33)
+#define L3_MIN_ENCAP_SBIT_PRESENT BIT_POS(32)
+#define L3_MIN_ENCAP_PARSING_ERROR BIT_POS(31)
+#define L3_PROTO_GRE_PRESENT BIT_POS(30)
+#define L3_PROTO_GRE_RBIT_PRESENT BIT_POS(29)
+#define L3_PROTO_GRE_PARSING_ERROR BIT_POS(28)
+#define L3_IP_UNKNOWN_PROTOCOL BIT_POS(27)
+#define L3_SOFT_PARSING_ERROR BIT_POS(26)
+#define L3_PROTO_UDP_PRESENT BIT_POS(25)
+#define L3_PROTO_UDP_PARSING_ERROR BIT_POS(24)
+#define L3_PROTO_TCP_PRESENT BIT_POS(23)
+#define L3_PROTO_TCP_OPT_PRESENT BIT_POS(22)
+#define L3_PROTO_TCP_CTRL_BIT_6_TO_11_PRESENT BIT_POS(21)
+#define L3_PROTO_TCP_CTRL_BIT_3_TO_5_PRESENT BIT_POS(20)
+#define L3_PROTO_TCP_PARSING_ERROR BIT_POS(19)
+#define L3_PROTO_IPSEC_PRESENT BIT_POS(18)
+#define L3_PROTO_IPSEC_ESP_PRESENT BIT_POS(17)
+#define L3_PROTO_IPSEC_AH_PRESENT BIT_POS(16)
+#define L3_PROTO_IPSEC_PARSING_ERROR BIT_POS(15)
+#define L3_PROTO_SCTP_PRESENT BIT_POS(14)
+#define L3_PROTO_SCTP_PARSING_ERROR BIT_POS(13)
+#define L3_PROTO_DCCP_PRESENT BIT_POS(12)
+#define L3_PROTO_DCCP_PARSING_ERROR BIT_POS(11)
+#define L4_UNKNOWN_PROTOCOL BIT_POS(10)
+#define L4_SOFT_PARSING_ERROR BIT_POS(9)
+#define L3_PROTO_GTP_PRESENT BIT_POS(8)
+#define L3_PROTO_GTP_PARSING_ERROR BIT_POS(7)
+#define L3_PROTO_ESP_PRESENT BIT_POS(6)
+#define L3_PROTO_ESP_PARSING_ERROR BIT_POS(5)
+#define L3_PROTO_ISCSI_PRESENT BIT_POS(4)
+#define L3_PROTO_CAPWAN__CTRL_PRESENT BIT_POS(3)
+#define L3_PROTO_CAPWAN__DATA_PRESENT BIT_POS(2)
+#define L5_SOFT_PARSING_ERROR BIT_POS(1)
+#define L3_IPV6_ROUTE_HDR_PRESENT BIT_POS(0)
+
+/* Debug frame, otherwise supposed to be discarded */
+#define DPAA2_ETH_FAS_DISC 0x80000000
+/* MACSEC frame */
+#define DPAA2_ETH_FAS_MS 0x40000000
+#define DPAA2_ETH_FAS_PTP 0x08000000
+/* Ethernet multicast frame */
+#define DPAA2_ETH_FAS_MC 0x04000000
+/* Ethernet broadcast frame */
+#define DPAA2_ETH_FAS_BC 0x02000000
+#define DPAA2_ETH_FAS_KSE 0x00040000
+#define DPAA2_ETH_FAS_EOFHE 0x00020000
+#define DPAA2_ETH_FAS_MNLE 0x00010000
+#define DPAA2_ETH_FAS_TIDE 0x00008000
+#define DPAA2_ETH_FAS_PIEE 0x00004000
+/* Frame length error */
+#define DPAA2_ETH_FAS_FLE 0x00002000
+/* Frame physical error; our favourite pastime */
+#define DPAA2_ETH_FAS_FPE 0x00001000
+#define DPAA2_ETH_FAS_PTE 0x00000080
+#define DPAA2_ETH_FAS_ISP 0x00000040
+#define DPAA2_ETH_FAS_PHE 0x00000020
+#define DPAA2_ETH_FAS_BLE 0x00000010
+/* L3 csum validation performed */
+#define DPAA2_ETH_FAS_L3CV 0x00000008
+/* L3 csum error */
+#define DPAA2_ETH_FAS_L3CE 0x00000004
+/* L4 csum validation performed */
+#define DPAA2_ETH_FAS_L4CV 0x00000002
+/* L4 csum error */
+#define DPAA2_ETH_FAS_L4CE 0x00000001
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
new file mode 100644
index 00000000..45764420
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -0,0 +1,1035 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_fslmc.h>
+
+#include <fslmc_logs.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_mempool.h>
+#include <dpaa2_hw_dpio.h>
+
+#include "dpaa2_ethdev.h"
+
+static struct rte_dpaa2_driver rte_dpaa2_pmd;
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+dpaa2_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = link;
+ struct rte_eth_link *src = &dev->data->dev_link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+dpaa2_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ dev_info->if_index = priv->hw_id;
+
+ dev_info->max_mac_addrs = priv->max_mac_filters;
+ dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN;
+ dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
+ dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
+ dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+ dev_info->speed_capa = ETH_LINK_SPEED_1G |
+ ETH_LINK_SPEED_2_5G |
+ ETH_LINK_SPEED_10G;
+}
+
+static int
+dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ uint16_t dist_idx;
+ uint32_t vq_id;
+ struct dpaa2_queue *mc_q, *mcq;
+ uint32_t tot_queues;
+ int i;
+ struct dpaa2_queue *dpaa2_q;
+
+ PMD_INIT_FUNC_TRACE();
+
+ tot_queues = priv->nb_rx_queues + priv->nb_tx_queues;
+ mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues,
+ RTE_CACHE_LINE_SIZE);
+ if (!mc_q) {
+ PMD_INIT_LOG(ERR, "malloc failed for rx/tx queues\n");
+ return -1;
+ }
+
+ for (i = 0; i < priv->nb_rx_queues; i++) {
+ mc_q->dev = dev;
+ priv->rx_vq[i] = mc_q++;
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ dpaa2_q->q_storage = rte_malloc("dq_storage",
+ sizeof(struct queue_storage_info_t),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa2_q->q_storage)
+ goto fail;
+
+ memset(dpaa2_q->q_storage, 0,
+ sizeof(struct queue_storage_info_t));
+ if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage))
+ goto fail;
+ }
+
+ for (i = 0; i < priv->nb_tx_queues; i++) {
+ mc_q->dev = dev;
+ mc_q->flow_id = DPNI_NEW_FLOW_ID;
+ priv->tx_vq[i] = mc_q++;
+ }
+
+ vq_id = 0;
+ for (dist_idx = 0; dist_idx < priv->num_dist_per_tc[DPAA2_DEF_TC];
+ dist_idx++) {
+ mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id];
+ mcq->tc_index = DPAA2_DEF_TC;
+ mcq->flow_id = dist_idx;
+ vq_id++;
+ }
+
+ return 0;
+fail:
+ i -= 1;
+ mc_q = priv->rx_vq[0];
+ while (i >= 0) {
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ dpaa2_free_dq_storage(dpaa2_q->q_storage);
+ rte_free(dpaa2_q->q_storage);
+ priv->rx_vq[i--] = NULL;
+ }
+ rte_free(mc_q);
+ return -1;
+}
+
+static int
+dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Check for correct configuration */
+ if (eth_conf->rxmode.mq_mode != ETH_MQ_RX_RSS &&
+ data->nb_rx_queues > 1) {
+ PMD_INIT_LOG(ERR, "Distribution is not enabled, "
+ "but Rx queues more than 1\n");
+ return -1;
+ }
+
+ if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ /* Return in case number of Rx queues is 1 */
+ if (data->nb_rx_queues == 1)
+ return 0;
+ ret = dpaa2_setup_flow_dist(dev,
+ eth_conf->rx_adv_conf.rss_conf.rss_hf);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "unable to set flow distribution."
+ "please check queue config\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/* Function to setup RX flow information. It contains traffic class ID,
+ * flow ID, destination configuration etc.
+ */
+static int
+dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct dpaa2_queue *dpaa2_q;
+ struct dpni_queue cfg;
+ uint8_t options = 0;
+ uint8_t flow_id;
+ uint32_t bpid;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, pool = %p, conf =%p",
+ dev, rx_queue_id, mb_pool, rx_conf);
+
+ if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
+ bpid = mempool_to_bpid(mb_pool);
+ ret = dpaa2_attach_bp_list(priv,
+ rte_dpaa2_bpid_info[bpid].bp_list);
+ if (ret)
+ return ret;
+ }
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+ dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */
+
+ /*Get the tc id and flow id from given VQ id*/
+ flow_id = rx_queue_id % priv->num_dist_per_tc[dpaa2_q->tc_index];
+ memset(&cfg, 0, sizeof(struct dpni_queue));
+
+ options = options | DPNI_QUEUE_OPT_USER_CTX;
+ cfg.user_context = (uint64_t)(dpaa2_q);
+
+ /*if ls2088 or rev2 device, enable the stashing */
+ if ((qbman_get_version() & 0xFFFF0000) > QMAN_REV_4000) {
+ options |= DPNI_QUEUE_OPT_FLC;
+ cfg.flc.stash_control = true;
+ cfg.flc.value &= 0xFFFFFFFFFFFFFFC0;
+ /* 00 00 00 - last 6 bit represent annotation, context stashing,
+ * data stashing setting 01 01 00 (0x14) to enable
+ * 1 line annotation, 1 line context
+ */
+ cfg.flc.value |= 0x14;
+ }
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_RX,
+ dpaa2_q->tc_index, flow_id, options, &cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error in setting the rx flow: = %d\n", ret);
+ return -1;
+ }
+
+ dev->data->rx_queues[rx_queue_id] = dpaa2_q;
+ return 0;
+}
+
+static int
+dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)
+ priv->tx_vq[tx_queue_id];
+ struct fsl_mc_io *dpni = priv->hw;
+ struct dpni_queue tx_conf_cfg;
+ struct dpni_queue tx_flow_cfg;
+ uint8_t options = 0, flow_id;
+ uint32_t tc_id;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Return if queue already configured */
+ if (dpaa2_q->flow_id != DPNI_NEW_FLOW_ID)
+ return 0;
+
+ memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
+ memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
+
+ if (priv->num_tc == 1) {
+ tc_id = 0;
+ flow_id = tx_queue_id % priv->num_dist_per_tc[tc_id];
+ } else {
+ tc_id = tx_queue_id;
+ flow_id = 0;
+ }
+
+ ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+ tc_id, flow_id, options, &tx_flow_cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error in setting the tx flow: "
+ "tc_id=%d, flow =%d ErrorCode = %x\n",
+ tc_id, flow_id, -ret);
+ return -1;
+ }
+
+ dpaa2_q->flow_id = flow_id;
+
+ if (tx_queue_id == 0) {
+ /*Set tx-conf and error configuration*/
+ ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW,
+ priv->token,
+ DPNI_CONF_DISABLE);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error in set tx conf mode settings"
+ " ErrorCode = %x", ret);
+ return -1;
+ }
+ }
+ dpaa2_q->tc_index = tc_id;
+
+ dev->data->tx_queues[tx_queue_id] = dpaa2_q;
+ return 0;
+}
+
+static void
+dpaa2_dev_rx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static void
+dpaa2_dev_tx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static const uint32_t *
+dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /*todo -= add more types */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP,
+ RTE_PTYPE_L4_ICMP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ if (dev->rx_pkt_burst == dpaa2_dev_rx)
+ return ptypes;
+ return NULL;
+}
+
+static int
+dpaa2_dev_start(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct dpaa2_dev_priv *priv = data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct dpni_queue cfg;
+ struct dpni_error_cfg err_cfg;
+ uint16_t qdid;
+ struct dpni_queue_id qid;
+ struct dpaa2_queue *dpaa2_q;
+ int ret, i;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure %d in enabling dpni %d device\n",
+ ret, priv->hw_id);
+ return ret;
+ }
+
+ ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX, &qdid);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get qdid:ErrorCode = %d\n", ret);
+ return ret;
+ }
+ priv->qdid = qdid;
+
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
+ ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_RX, dpaa2_q->tc_index,
+ dpaa2_q->flow_id, &cfg, &qid);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get flow "
+ "information Error code = %d\n", ret);
+ return ret;
+ }
+ dpaa2_q->fqid = qid.fqid;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_RX_L3_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to set RX l3 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_RX_L4_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get RX l4 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_TX_L3_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to set TX l3 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+ DPNI_OFF_TX_L4_CSUM, true);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to get TX l4 csum:Error = %d\n", ret);
+ return ret;
+ }
+
+ /*checksum errors, send them to normal path and set it in annotation */
+ err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE;
+
+ err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE;
+ err_cfg.set_frame_annotation = true;
+
+ ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW,
+ priv->token, &err_cfg);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error to dpni_set_errors_behavior:"
+ "code = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC.
+ */
+static void
+dpaa2_dev_stop(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int ret;
+ struct rte_eth_link link;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = dpni_disable(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure (ret %d) in disabling dpni %d dev\n",
+ ret, priv->hw_id);
+ return;
+ }
+
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ dpaa2_dev_atomic_write_link_status(dev, &link);
+}
+
+static void
+dpaa2_dev_close(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Clean the device first */
+ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure cleaning dpni device with"
+ " error code %d\n", ret);
+ return;
+ }
+}
+
+static void
+dpaa2_dev_promiscuous_enable(
+ struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return;
+ }
+
+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to enable promiscuous mode %d", ret);
+}
+
+static void
+dpaa2_dev_promiscuous_disable(
+ struct rte_eth_dev *dev)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return;
+ }
+
+ ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false);
+ if (ret < 0)
+ RTE_LOG(ERR, PMD, "Unable to disable promiscuous mode %d", ret);
+}
+
+static int
+dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return -EINVAL;
+ }
+
+ /* check that mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN))
+ return -EINVAL;
+
+ /* Set the Max Rx frame length as 'mtu' +
+ * Maximum Ethernet header length
+ */
+ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token,
+ mtu + ETH_VLAN_HLEN);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "setting the max frame length failed");
+ return -1;
+ }
+ PMD_DRV_LOG(INFO, "MTU is configured %d for the device\n", mtu);
+ return 0;
+}
+
+static
+void dpaa2_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+ uint8_t page0 = 0, page1 = 1, page2 = 2;
+ union dpni_statistics value;
+
+ memset(&value, 0, sizeof(union dpni_statistics));
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!dpni) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return;
+ }
+
+ if (!stats) {
+ RTE_LOG(ERR, PMD, "stats is NULL");
+ return;
+ }
+
+ /*Get Counters from page_0*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page0, &value);
+ if (retcode)
+ goto err;
+
+ stats->ipackets = value.page_0.ingress_all_frames;
+ stats->ibytes = value.page_0.ingress_all_bytes;
+
+ /*Get Counters from page_1*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page1, &value);
+ if (retcode)
+ goto err;
+
+ stats->opackets = value.page_1.egress_all_frames;
+ stats->obytes = value.page_1.egress_all_bytes;
+
+ /*Get Counters from page_2*/
+ retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token,
+ page2, &value);
+ if (retcode)
+ goto err;
+
+ stats->ierrors = value.page_2.ingress_discarded_frames;
+ stats->oerrors = value.page_2.egress_discarded_frames;
+ stats->imissed = value.page_2.ingress_nobuffer_discards;
+
+ return;
+
+err:
+ RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
+ return;
+};
+
+static
+void dpaa2_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int32_t retcode;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "dpni is NULL");
+ return;
+ }
+
+ retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token);
+ if (retcode)
+ goto error;
+
+ return;
+
+error:
+ RTE_LOG(ERR, PMD, "Operation not completed:Error Code = %d\n", retcode);
+ return;
+};
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+dpaa2_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ int ret;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ struct rte_eth_link link, old;
+ struct dpni_link_state state = {0};
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpni == NULL) {
+ RTE_LOG(ERR, PMD, "error : dpni is NULL");
+ return 0;
+ }
+ memset(&old, 0, sizeof(old));
+ dpaa2_dev_atomic_read_link_status(dev, &old);
+
+ ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state);
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD, "error: dpni_get_link_state %d", ret);
+ return -1;
+ }
+
+ if ((old.link_status == state.up) && (old.link_speed == state.rate)) {
+ RTE_LOG(DEBUG, PMD, "No change in status\n");
+ return -1;
+ }
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+ link.link_status = state.up;
+ link.link_speed = state.rate;
+
+ if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ else
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+ dpaa2_dev_atomic_write_link_status(dev, &link);
+
+ if (link.link_status)
+ PMD_DRV_LOG(INFO, "Port %d Link is Up\n", dev->data->port_id);
+ else
+ PMD_DRV_LOG(INFO, "Port %d Link is Down\n", dev->data->port_id);
+ return 0;
+}
+
+static struct eth_dev_ops dpaa2_ethdev_ops = {
+ .dev_configure = dpaa2_eth_dev_configure,
+ .dev_start = dpaa2_dev_start,
+ .dev_stop = dpaa2_dev_stop,
+ .dev_close = dpaa2_dev_close,
+ .promiscuous_enable = dpaa2_dev_promiscuous_enable,
+ .promiscuous_disable = dpaa2_dev_promiscuous_disable,
+ .link_update = dpaa2_dev_link_update,
+ .stats_get = dpaa2_dev_stats_get,
+ .stats_reset = dpaa2_dev_stats_reset,
+ .dev_infos_get = dpaa2_dev_info_get,
+ .dev_supported_ptypes_get = dpaa2_supported_ptypes_get,
+ .mtu_set = dpaa2_dev_mtu_set,
+ .rx_queue_setup = dpaa2_dev_rx_queue_setup,
+ .rx_queue_release = dpaa2_dev_rx_queue_release,
+ .tx_queue_setup = dpaa2_dev_tx_queue_setup,
+ .tx_queue_release = dpaa2_dev_tx_queue_release,
+};
+
+static int
+dpaa2_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_device *dev = eth_dev->device;
+ struct rte_dpaa2_device *dpaa2_dev;
+ struct fsl_mc_io *dpni_dev;
+ struct dpni_attr attr;
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct dpni_buffer_layout layout;
+ int i, ret, hw_id;
+ int tot_size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
+
+ hw_id = dpaa2_dev->object_id;
+
+ dpni_dev = (struct fsl_mc_io *)malloc(sizeof(struct fsl_mc_io));
+ if (!dpni_dev) {
+ PMD_INIT_LOG(ERR, "malloc failed for dpni device\n");
+ return -1;
+ }
+
+ dpni_dev->regs = rte_mcp_ptr_list[0];
+ ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure in opening dpni@%d device with"
+ " error code %d\n", hw_id, ret);
+ return -1;
+ }
+
+ /* Clean the device first */
+ ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure cleaning dpni@%d device with"
+ " error code %d\n", hw_id, ret);
+ return -1;
+ }
+
+ ret = dpni_get_attributes(dpni_dev, CMD_PRI_LOW, priv->token, &attr);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure in getting dpni@%d attribute, "
+ " error code %d\n", hw_id, ret);
+ return -1;
+ }
+
+ priv->num_tc = attr.num_tcs;
+ for (i = 0; i < attr.num_tcs; i++) {
+ priv->num_dist_per_tc[i] = attr.num_queues;
+ break;
+ }
+
+ /* Distribution is per Tc only,
+ * so choosing RX queues from default TC only
+ */
+ priv->nb_rx_queues = priv->num_dist_per_tc[DPAA2_DEF_TC];
+
+ if (attr.num_tcs == 1)
+ priv->nb_tx_queues = attr.num_queues;
+ else
+ priv->nb_tx_queues = attr.num_tcs;
+
+ PMD_INIT_LOG(DEBUG, "num_tc %d", priv->num_tc);
+ PMD_INIT_LOG(DEBUG, "nb_rx_queues %d", priv->nb_rx_queues);
+
+ priv->hw = dpni_dev;
+ priv->hw_id = hw_id;
+ priv->options = attr.options;
+ priv->max_mac_filters = attr.mac_filter_entries;
+ priv->max_vlan_filters = attr.vlan_filter_entries;
+ priv->flags = 0;
+
+ /* Allocate memory for hardware structure for queues */
+ ret = dpaa2_alloc_rx_tx_queues(eth_dev);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "dpaa2_alloc_rx_tx_queuesFailed\n");
+ return -ret;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ eth_dev->data->mac_addrs = rte_zmalloc("dpni",
+ ETHER_ADDR_LEN * attr.mac_filter_entries, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+ "store MAC addresses",
+ ETHER_ADDR_LEN * attr.mac_filter_entries);
+ return -ENOMEM;
+ }
+
+ ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW,
+ priv->token,
+ (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes));
+ if (ret) {
+ PMD_INIT_LOG(ERR, "DPNI get mac address failed:"
+ " Error Code = %d\n", ret);
+ return -ret;
+ }
+
+ /* ... rx buffer layout ... */
+ tot_size = DPAA2_HW_BUF_RESERVE + RTE_PKTMBUF_HEADROOM;
+ tot_size = RTE_ALIGN_CEIL(tot_size,
+ DPAA2_PACKET_LAYOUT_ALIGN);
+
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
+ DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
+
+ layout.pass_frame_status = 1;
+ layout.data_head_room = tot_size
+ - DPAA2_FD_PTA_SIZE - DPAA2_MBUF_HW_ANNOTATION;
+ layout.private_data_size = DPAA2_FD_PTA_SIZE;
+ layout.pass_parser_result = 1;
+ PMD_INIT_LOG(DEBUG, "Tot_size = %d, head room = %d, private = %d",
+ tot_size, layout.data_head_room, layout.private_data_size);
+ ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_RX, &layout);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Err(%d) in setting rx buffer layout", ret);
+ return -1;
+ }
+
+ /* ... tx buffer layout ... */
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ layout.pass_frame_status = 1;
+ ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX, &layout);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error (%d) in setting tx buffer"
+ " layout", ret);
+ return -1;
+ }
+
+ /* ... tx-conf and error buffer layout ... */
+ memset(&layout, 0, sizeof(struct dpni_buffer_layout));
+ layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
+ layout.pass_frame_status = 1;
+ ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token,
+ DPNI_QUEUE_TX_CONFIRM, &layout);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Error (%d) in setting tx-conf buffer"
+ " layout", ret);
+ return -1;
+ }
+
+ eth_dev->dev_ops = &dpaa2_ethdev_ops;
+ eth_dev->data->drv_name = rte_dpaa2_pmd.driver.name;
+
+ eth_dev->rx_pkt_burst = dpaa2_dev_rx;
+ eth_dev->tx_pkt_burst = dpaa2_dev_tx;
+ rte_fslmc_vfio_dmamap();
+
+ return 0;
+}
+
+static int
+dpaa2_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct dpaa2_dev_priv *priv = eth_dev->data->dev_private;
+ struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
+ int i, ret;
+ struct dpaa2_queue *dpaa2_q;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ if (!dpni) {
+ PMD_INIT_LOG(WARNING, "Already closed or not started");
+ return -1;
+ }
+
+ dpaa2_dev_close(eth_dev);
+
+ if (priv->rx_vq[0]) {
+ /* cleaning up queue storage */
+ for (i = 0; i < priv->nb_rx_queues; i++) {
+ dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
+ if (dpaa2_q->q_storage)
+ rte_free(dpaa2_q->q_storage);
+ }
+ /*free the all queue memory */
+ rte_free(priv->rx_vq[0]);
+ priv->rx_vq[0] = NULL;
+ }
+
+ /* Allocate memory for storing MAC addresses */
+ if (eth_dev->data->mac_addrs) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ }
+
+ /*Close the device at underlying layer*/
+ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failure closing dpni device with"
+ " error code %d\n", ret);
+ }
+
+ /*Free the allocated memory for ethernet private data and dpni*/
+ priv->hw = NULL;
+ free(dpni);
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ return 0;
+}
+
+static int
+rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
+ struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ char ethdev_name[RTE_ETH_NAME_MAX_LEN];
+
+ int diag;
+
+ sprintf(ethdev_name, "dpni-%d", dpaa2_dev->object_id);
+
+ eth_dev = rte_eth_dev_allocate(ethdev_name);
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev->data->dev_private = rte_zmalloc(
+ "ethdev private structure",
+ sizeof(struct dpaa2_dev_priv),
+ RTE_CACHE_LINE_SIZE);
+ if (eth_dev->data->dev_private == NULL) {
+ PMD_INIT_LOG(CRIT, "Cannot allocate memzone for"
+ " private port data\n");
+ rte_eth_dev_release_port(eth_dev);
+ return -ENOMEM;
+ }
+ }
+ eth_dev->device = &dpaa2_dev->device;
+ dpaa2_dev->eth_dev = eth_dev;
+ eth_dev->data->rx_mbuf_alloc_failed = 0;
+
+ /* Invoke PMD device initialization function */
+ diag = dpaa2_dev_init(eth_dev);
+ if (diag == 0)
+ return 0;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+ return diag;
+}
+
+static int
+rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev)
+{
+ struct rte_eth_dev *eth_dev;
+
+ eth_dev = dpaa2_dev->eth_dev;
+ dpaa2_dev_uninit(eth_dev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(eth_dev->data->dev_private);
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_dpaa2_driver rte_dpaa2_pmd = {
+ .drv_type = DPAA2_MC_DPNI_DEVID,
+ .probe = rte_dpaa2_probe,
+ .remove = rte_dpaa2_remove,
+};
+
+RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.h b/drivers/net/dpaa2/dpaa2_ethdev.h
new file mode 100644
index 00000000..7196398f
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_ethdev.h
@@ -0,0 +1,83 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_ETHDEV_H
+#define _DPAA2_ETHDEV_H
+
+#include <mc/fsl_dpni.h>
+#include <mc/fsl_mc_sys.h>
+
+#define DPAA2_MIN_RX_BUF_SIZE 512
+#define DPAA2_MAX_RX_PKT_LEN 10240 /*WRIOP support*/
+
+#define MAX_TCS DPNI_MAX_TC
+#define MAX_RX_QUEUES 16
+#define MAX_TX_QUEUES 16
+
+/*default tc to be used for ,congestion, distribution etc configuration. */
+#define DPAA2_DEF_TC 0
+
+/* Size of the input SMMU mapped memory required by MC */
+#define DIST_PARAM_IOVA_SIZE 256
+
+struct dpaa2_dev_priv {
+ void *hw;
+ int32_t hw_id;
+ int32_t qdid;
+ uint16_t token;
+ uint8_t nb_tx_queues;
+ uint8_t nb_rx_queues;
+ void *rx_vq[MAX_RX_QUEUES];
+ void *tx_vq[MAX_TX_QUEUES];
+
+ struct dpaa2_bp_list *bp_list; /**<Attached buffer pool list */
+ uint32_t options;
+ uint16_t num_dist_per_tc[MAX_TCS];
+ uint8_t max_mac_filters;
+ uint8_t max_vlan_filters;
+ uint8_t num_tc;
+ uint8_t flags; /*dpaa2 config flags */
+};
+
+int dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
+ uint32_t req_dist_set);
+
+int dpaa2_remove_flow_dist(struct rte_eth_dev *eth_dev,
+ uint8_t tc_index);
+
+int dpaa2_attach_bp_list(struct dpaa2_dev_priv *priv, void *blist);
+
+uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+uint16_t dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+
+#endif /* _DPAA2_ETHDEV_H */
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
new file mode 100644
index 00000000..c5d49cbe
--- /dev/null
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -0,0 +1,422 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (c) 2016 NXP. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <time.h>
+#include <net/if.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <fslmc_vfio.h>
+#include <dpaa2_hw_pvt.h>
+#include <dpaa2_hw_dpio.h>
+#include <dpaa2_hw_mempool.h>
+
+#include "dpaa2_ethdev.h"
+#include "base/dpaa2_hw_dpni_annot.h"
+
+static inline uint32_t __attribute__((hot))
+dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
+{
+ uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
+ struct dpaa2_annot_hdr *annotation =
+ (struct dpaa2_annot_hdr *)hw_annot_addr;
+
+ PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
+
+ if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
+ pkt_type = RTE_PTYPE_L2_ETHER_ARP;
+ goto parse_done;
+ } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
+ pkt_type = RTE_PTYPE_L2_ETHER;
+ } else {
+ goto parse_done;
+ }
+
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
+ L3_IPV4_N_PRESENT)) {
+ pkt_type |= RTE_PTYPE_L3_IPV4;
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
+ L3_IP_N_OPT_PRESENT))
+ pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
+
+ } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
+ L3_IPV6_N_PRESENT)) {
+ pkt_type |= RTE_PTYPE_L3_IPV6;
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
+ L3_IP_N_OPT_PRESENT))
+ pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
+ } else {
+ goto parse_done;
+ }
+
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
+ L3_IP_1_MORE_FRAGMENT |
+ L3_IP_N_FIRST_FRAGMENT |
+ L3_IP_N_MORE_FRAGMENT)) {
+ pkt_type |= RTE_PTYPE_L4_FRAG;
+ goto parse_done;
+ } else {
+ pkt_type |= RTE_PTYPE_L4_NONFRAG;
+ }
+
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_UDP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_TCP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_SCTP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
+ pkt_type |= RTE_PTYPE_L4_ICMP;
+
+ else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
+ pkt_type |= RTE_PTYPE_UNKNOWN;
+
+parse_done:
+ return pkt_type;
+}
+
+static inline void __attribute__((hot))
+dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
+{
+ struct dpaa2_annot_hdr *annotation =
+ (struct dpaa2_annot_hdr *)hw_annot_addr;
+
+ if (BIT_ISSET_AT_POS(annotation->word3,
+ L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+
+ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+}
+
+static inline struct rte_mbuf *__attribute__((hot))
+eth_fd_to_mbuf(const struct qbman_fd *fd)
+{
+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ /* need to repopulated some of the fields,
+ * as they may have changed in last transmission
+ */
+ mbuf->nb_segs = 1;
+ mbuf->ol_flags = 0;
+ mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
+ mbuf->data_len = DPAA2_GET_FD_LEN(fd);
+ mbuf->pkt_len = mbuf->data_len;
+
+ /* Parse the packet */
+ /* parse results are after the private - sw annotation area */
+ mbuf->packet_type = dpaa2_dev_rx_parse(
+ (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+ + DPAA2_FD_PTA_SIZE);
+
+ dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd)) +
+ DPAA2_FD_PTA_SIZE, mbuf);
+
+ mbuf->next = NULL;
+ rte_mbuf_refcnt_set(mbuf, 1);
+
+ PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
+ mbuf, mbuf->buf_addr, mbuf->data_off,
+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
+
+ return mbuf;
+}
+
+static void __attribute__ ((noinline)) __attribute__((hot))
+eth_mbuf_to_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ /*Resetting the buffer pool id and offset field*/
+ fd->simple.bpid_offset = 0;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
+ DPAA2_SET_FD_LEN(fd, mbuf->data_len);
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
+
+ PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
+ "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
+ mbuf, mbuf->buf_addr, mbuf->data_off,
+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
+}
+
+
+static inline int __attribute__((hot))
+eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_mbuf *m;
+ void *mb = NULL;
+
+ if (rte_dpaa2_mbuf_alloc_bulk(
+ rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
+ PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
+ rte_pktmbuf_free(mbuf);
+ return -1;
+ }
+ m = (struct rte_mbuf *)mb;
+ memcpy((char *)m->buf_addr + mbuf->data_off,
+ (void *)((char *)mbuf->buf_addr + mbuf->data_off),
+ mbuf->pkt_len);
+
+ /* Copy required fields */
+ m->data_off = mbuf->data_off;
+ m->ol_flags = mbuf->ol_flags;
+ m->packet_type = mbuf->packet_type;
+ m->tx_offload = mbuf->tx_offload;
+
+ /*Resetting the buffer pool id and offset field*/
+ fd->simple.bpid_offset = 0;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
+ DPAA2_SET_FD_LEN(fd, mbuf->data_len);
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
+ DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
+
+ PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
+ (void *)mbuf, mbuf->buf_addr);
+
+ PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
+ /*free the original packet */
+ rte_pktmbuf_free(mbuf);
+
+ return 0;
+}
+
+uint16_t
+dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function is responsible to receive frames for a given device and VQ*/
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_q->fqid;
+ int ret, num_rx = 0;
+ uint8_t is_last = 0, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+ struct rte_eth_dev *dev = dpaa2_q->dev;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+ dq_storage = dpaa2_q->q_storage->dq_storage[0];
+
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_numframes(&pulldesc,
+ (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+ DPAA2_DQRR_RING_SIZE : nb_pkts);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ /* todo optimization - we can have dq_storage_phys available*/
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ /*Issue a volatile dequeue command. */
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ PMD_RX_LOG(ERR, "VDQ command is not issued."
+ "QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ };
+
+ /* Receive the packets till Last Dequeue entry is found with
+ * respect to the above issues PULL command.
+ */
+ while (!is_last) {
+ struct rte_mbuf *mbuf;
+ /*Check if the previous issued command is completed.
+ * Also seems like the SWP is shared between the
+ * Ethernet Driver and the SEC driver.
+ */
+ while (!qbman_check_command_complete(swp, dq_storage))
+ ;
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_result_has_new_result(swp, dq_storage))
+ ;
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ is_last = 1;
+ /* Check for valid frame. */
+ status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+
+ fd = qbman_result_DQ_fd(dq_storage);
+ mbuf = (struct rte_mbuf *)DPAA2_IOVA_TO_VADDR(
+ DPAA2_GET_FD_ADDR(fd)
+ - rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ /* Prefeth mbuf */
+ rte_prefetch0(mbuf);
+ /* Prefetch Annotation address for the parse results */
+ rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd)
+ + DPAA2_FD_PTA_SIZE + 16));
+
+ bufs[num_rx] = eth_fd_to_mbuf(fd);
+ bufs[num_rx]->port = dev->data->port_id;
+
+ num_rx++;
+ dq_storage++;
+ } /* End of Packet Rx loop */
+
+ dpaa2_q->rx_pkts += num_rx;
+
+ /*Return the total number of packets received to DPAA2 app*/
+ return num_rx;
+}
+
+/*
+ * Callback to handle sending packets through WRIOP based interface
+ */
+uint16_t
+dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function to transmit the frames to given device and VQ*/
+ uint32_t loop;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct rte_mempool *mp;
+ struct qbman_eq_desc eqdesc;
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_swp *swp;
+ uint16_t num_tx = 0;
+ uint16_t bpid;
+ struct rte_eth_dev *dev = dpaa2_q->dev;
+ struct dpaa2_dev_priv *priv = dev->data->dev_private;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
+
+ /*Prepare enqueue descriptor*/
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+ qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
+ dpaa2_q->flow_id, dpaa2_q->tc_index);
+
+ /*Clear the unused FD fields before sending*/
+ while (nb_pkts) {
+ frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ fd_arr[loop].simple.frc = 0;
+ DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
+ DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
+ mp = (*bufs)->pool;
+ /* Not a hw_pkt pool allocated frame */
+ if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+ PMD_TX_LOG(ERR, "non hw offload bufffer ");
+ /* alloc should be from the default buffer pool
+ * attached to this interface
+ */
+ if (priv->bp_list) {
+ bpid = priv->bp_list->buf_pool.bpid;
+ } else {
+ PMD_TX_LOG(ERR, "errr: why no bpool"
+ " attached");
+ num_tx = 0;
+ goto skip_tx;
+ }
+ if (eth_copy_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid)) {
+ bufs++;
+ continue;
+ }
+ } else {
+ bpid = mempool_to_bpid(mp);
+ eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid);
+ }
+ bufs++;
+ }
+ loop = 0;
+ while (loop < frames_to_send) {
+ loop += qbman_swp_send_multiple(swp, &eqdesc,
+ &fd_arr[loop], frames_to_send - loop);
+ }
+
+ num_tx += frames_to_send;
+ dpaa2_q->tx_pkts += frames_to_send;
+ nb_pkts -= frames_to_send;
+ }
+skip_tx:
+ return num_tx;
+}
diff --git a/drivers/net/dpaa2/mc/dpni.c b/drivers/net/dpaa2/mc/dpni.c
new file mode 100644
index 00000000..33306140
--- /dev/null
+++ b/drivers/net/dpaa2/mc/dpni.c
@@ -0,0 +1,739 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <fsl_mc_sys.h>
+#include <fsl_mc_cmd.h>
+#include <fsl_dpni.h>
+#include <fsl_dpni_cmd.h>
+
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+ uint8_t *key_cfg_buf)
+{
+ int i, j;
+ int offset = 0;
+ int param = 1;
+ uint64_t *params = (uint64_t *)key_cfg_buf;
+
+ if (!key_cfg_buf || !cfg)
+ return -EINVAL;
+
+ params[0] |= mc_enc(0, 8, cfg->num_extracts);
+ params[0] = cpu_to_le64(params[0]);
+
+ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS)
+ return -EINVAL;
+
+ for (i = 0; i < cfg->num_extracts; i++) {
+ switch (cfg->extracts[i].type) {
+ case DPKG_EXTRACT_FROM_HDR:
+ params[param] |= mc_enc(0, 8,
+ cfg->extracts[i].extract.from_hdr.prot);
+ params[param] |= mc_enc(8, 4,
+ cfg->extracts[i].extract.from_hdr.type);
+ params[param] |= mc_enc(16, 8,
+ cfg->extracts[i].extract.from_hdr.size);
+ params[param] |= mc_enc(24, 8,
+ cfg->extracts[i].extract.
+ from_hdr.offset);
+ params[param] |= mc_enc(32, 32,
+ cfg->extracts[i].extract.
+ from_hdr.field);
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ params[param] |= mc_enc(0, 8,
+ cfg->extracts[i].extract.
+ from_hdr.hdr_index);
+ break;
+ case DPKG_EXTRACT_FROM_DATA:
+ params[param] |= mc_enc(16, 8,
+ cfg->extracts[i].extract.
+ from_data.size);
+ params[param] |= mc_enc(24, 8,
+ cfg->extracts[i].extract.
+ from_data.offset);
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ break;
+ case DPKG_EXTRACT_FROM_PARSE:
+ params[param] |= mc_enc(16, 8,
+ cfg->extracts[i].extract.
+ from_parse.size);
+ params[param] |= mc_enc(24, 8,
+ cfg->extracts[i].extract.
+ from_parse.offset);
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ params[param] |= mc_enc(
+ 24, 8, cfg->extracts[i].num_of_byte_masks);
+ params[param] |= mc_enc(32, 4, cfg->extracts[i].type);
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ for (offset = 0, j = 0;
+ j < DPKG_NUM_OF_MASKS;
+ offset += 16, j++) {
+ params[param] |= mc_enc(
+ (offset), 8, cfg->extracts[i].masks[j].mask);
+ params[param] |= mc_enc(
+ (offset + 8), 8,
+ cfg->extracts[i].masks[j].offset);
+ }
+ params[param] = cpu_to_le64(params[param]);
+ param++;
+ }
+ return 0;
+}
+
+int dpni_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpni_id,
+ uint16_t *token)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ DPNI_CMD_OPEN(cmd, dpni_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+
+ return 0;
+}
+
+int dpni_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpni_cfg *cfg,
+ uint32_t *obj_id)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ DPNI_CMD_CREATE(cmd, cfg);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+
+ return 0;
+}
+
+int dpni_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ /* set object id to destroy */
+ CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_pools_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_POOLS(cmd, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_IS_ENABLED(cmd, *en);
+
+ return 0;
+}
+
+int dpni_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_attr *attr)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_ATTR(cmd, attr);
+
+ return 0;
+}
+
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_error_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_BUFFER_LAYOUT(cmd, qtype);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_BUFFER_LAYOUT(cmd, layout);
+
+ return 0;
+}
+
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_BUFFER_LAYOUT,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_BUFFER_LAYOUT(cmd, qtype, layout);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t config)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_OFFLOAD,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_OFFLOAD(cmd, type, config);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t *config)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_OFFLOAD,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_OFFLOAD(cmd, type);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_OFFLOAD(cmd, *config);
+
+ return 0;
+}
+
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint16_t *qdid)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_QDID(cmd, qtype);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_QDID(cmd, *qdid);
+
+ return 0;
+}
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_state *state)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_LINK_STATE(cmd, state);
+
+ return 0;
+}
+
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t max_frame_length)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *max_frame_length)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length);
+
+ return 0;
+}
+
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en);
+
+ return 0;
+}
+
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6])
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr);
+
+ return 0;
+}
+
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode mode)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONFIRMATION_MODE,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_TX_CONFIRMATION_MODE(cmd, mode);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ DPNI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+
+ return 0;
+}
+
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ uint8_t options,
+ const struct dpni_queue *queue)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QUEUE,
+ cmd_flags,
+ token);
+ DPNI_CMD_SET_QUEUE(cmd, qtype, tc, index, options, queue);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QUEUE,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_QUEUE(cmd, qtype, tc, index);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_QUEUE(cmd, queue, qid);
+
+ return 0;
+}
+
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t page,
+ union dpni_statistics *stat)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_STATISTICS,
+ cmd_flags,
+ token);
+ DPNI_CMD_GET_STATISTICS(cmd, page);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ DPNI_RSP_GET_STATISTICS(cmd, stat);
+
+ return 0;
+}
+
+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET_STATISTICS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/dpaa2/mc/fsl_dpkg.h b/drivers/net/dpaa2/mc/fsl_dpkg.h
new file mode 100644
index 00000000..3e0f4b0e
--- /dev/null
+++ b/drivers/net/dpaa2/mc/fsl_dpkg.h
@@ -0,0 +1,184 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPKG_H_
+#define __FSL_DPKG_H_
+
+#include <fsl_net.h>
+
+/* Data Path Key Generator API
+ * Contains initialization APIs and runtime APIs for the Key Generator
+ */
+
+/** Key Generator properties */
+
+/**
+ * Number of masks per key extraction
+ */
+#define DPKG_NUM_OF_MASKS 4
+/**
+ * Number of extractions per key profile
+ */
+#define DPKG_MAX_NUM_OF_EXTRACTS 10
+
+/**
+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types
+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset
+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field
+ * @DPKG_FULL_FIELD: Extract a full field
+ */
+enum dpkg_extract_from_hdr_type {
+ DPKG_FROM_HDR = 0,
+ DPKG_FROM_FIELD = 1,
+ DPKG_FULL_FIELD = 2
+};
+
+/**
+ * enum dpkg_extract_type - Enumeration for selecting extraction type
+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header
+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header
+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result;
+ * e.g. can be used to extract header existence;
+ * please refer to 'Parse Result definition' section in the parser BG
+ */
+enum dpkg_extract_type {
+ DPKG_EXTRACT_FROM_HDR = 0,
+ DPKG_EXTRACT_FROM_DATA = 1,
+ DPKG_EXTRACT_FROM_PARSE = 3
+};
+
+/**
+ * struct dpkg_mask - A structure for defining a single extraction mask
+ * @mask: Byte mask for the extracted content
+ * @offset: Offset within the extracted content
+ */
+struct dpkg_mask {
+ uint8_t mask;
+ uint8_t offset;
+};
+
+/**
+ * struct dpkg_extract - A structure for defining a single extraction
+ * @type: Determines how the union below is interpreted:
+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr';
+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data';
+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse'
+ * @extract: Selects extraction method
+ * @num_of_byte_masks: Defines the number of valid entries in the array below;
+ * This is also the number of bytes to be used as masks
+ * @masks: Masks parameters
+ */
+struct dpkg_extract {
+ enum dpkg_extract_type type;
+ /**
+ * union extract - Selects extraction method
+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ */
+ union {
+ /**
+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR'
+ * @prot: Any of the supported headers
+ * @type: Defines the type of header extraction:
+ * DPKG_FROM_HDR: use size & offset below;
+ * DPKG_FROM_FIELD: use field, size and offset below;
+ * DPKG_FULL_FIELD: use field below
+ * @field: One of the supported fields (NH_FLD_)
+ *
+ * @size: Size in bytes
+ * @offset: Byte offset
+ * @hdr_index: Clear for cases not listed below;
+ * Used for protocols that may have more than a single
+ * header, 0 indicates an outer header;
+ * Supported protocols (possible values):
+ * NET_PROT_VLAN (0, HDR_INDEX_LAST);
+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST);
+ * NET_PROT_IP(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv4(0, HDR_INDEX_LAST);
+ * NET_PROT_IPv6(0, HDR_INDEX_LAST);
+ */
+
+ struct {
+ enum net_prot prot;
+ enum dpkg_extract_from_hdr_type type;
+ uint32_t field;
+ uint8_t size;
+ uint8_t offset;
+ uint8_t hdr_index;
+ } from_hdr;
+ /**
+ * struct from_data
+ * Used when 'type = DPKG_EXTRACT_FROM_DATA'
+ * @size: Size in bytes
+ * @offset: Byte offset
+ */
+ struct {
+ uint8_t size;
+ uint8_t offset;
+ } from_data;
+
+ /**
+ * struct from_parse
+ * Used when 'type = DPKG_EXTRACT_FROM_PARSE'
+ * @size: Size in bytes
+ * @offset: Byte offset
+ */
+ struct {
+ uint8_t size;
+ uint8_t offset;
+ } from_parse;
+ } extract;
+
+ uint8_t num_of_byte_masks;
+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS];
+};
+
+/**
+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation
+ * profile (rule)
+ * @num_extracts: Defines the number of valid entries in the array below
+ * @extracts: Array of required extractions
+ */
+struct dpkg_profile_cfg {
+ uint8_t num_extracts;
+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS];
+};
+
+#endif /* __FSL_DPKG_H_ */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni.h b/drivers/net/dpaa2/mc/fsl_dpni.h
new file mode 100644
index 00000000..ef14f858
--- /dev/null
+++ b/drivers/net/dpaa2/mc/fsl_dpni.h
@@ -0,0 +1,1217 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPNI_H
+#define __FSL_DPNI_H
+
+#include <fsl_dpkg.h>
+
+struct fsl_mc_io;
+
+/**
+ * Data Path Network Interface API
+ * Contains initialization APIs and runtime control APIs for DPNI
+ */
+
+/** General DPNI macros */
+
+/**
+ * Maximum number of traffic classes
+ */
+#define DPNI_MAX_TC 8
+/**
+ * Maximum number of buffer pools per DPNI
+ */
+#define DPNI_MAX_DPBP 8
+/**
+ * Maximum number of storage-profiles per DPNI
+ */
+#define DPNI_MAX_SP 2
+
+/**
+ * All traffic classes considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TCS (uint8_t)(-1)
+/**
+ * All flows within traffic class considered; see dpni_set_queue()
+ */
+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1)
+/**
+ * Generate new flow ID; see dpni_set_queue()
+ */
+#define DPNI_NEW_FLOW_ID (uint16_t)(-1)
+/**
+ * Tx traffic is always released to a buffer pool on transmit, there are no
+ * resources allocated to have the frames confirmed back to the source after
+ * transmission.
+ */
+#define DPNI_OPT_TX_FRM_RELEASE 0x000001
+/**
+ * Disables support for MAC address filtering for addresses other than primary
+ * MAC address. This affects both unicast and multicast. Promiscuous mode can
+ * still be enabled/disabled for both unicast and multicast. If promiscuous mode
+ * is disabled, only traffic matching the primary MAC address will be accepted.
+ */
+#define DPNI_OPT_NO_MAC_FILTER 0x000002
+/**
+ * Allocate policers for this DPNI. They can be used to rate-limit traffic per
+ * traffic class (TC) basis.
+ */
+#define DPNI_OPT_HAS_POLICING 0x000004
+/**
+ * Congestion can be managed in several ways, allowing the buffer pool to
+ * deplete on ingress, taildrop on each queue or use congestion groups for sets
+ * of queues. If set, it configures a single congestion groups across all TCs.
+ * If reset, a congestion group is allocated for each TC. Only relevant if the
+ * DPNI has multiple traffic classes.
+ */
+#define DPNI_OPT_SHARED_CONGESTION 0x000008
+/**
+ * Enables TCAM for Flow Steering and QoS look-ups. If not specified, all
+ * look-ups are exact match. Note that TCAM is not available on LS1088 and its
+ * variants. Setting this bit on these SoCs will trigger an error.
+ */
+#define DPNI_OPT_HAS_KEY_MASKING 0x000010
+/**
+ * Disables the flow steering table.
+ */
+#define DPNI_OPT_NO_FS 0x000020
+
+/**
+ * dpni_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpni_id: DPNI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpni_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpni_id,
+ uint16_t *token);
+
+/**
+ * dpni_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpni_cfg - Structure representing DPNI configuration
+ * @mac_addr: Primary MAC address
+ * @adv: Advanced parameters; default is all zeros;
+ * use this structure to change default settings
+ */
+struct dpni_cfg {
+ /**
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * @fs_entries: Number of entries in the flow steering table.
+ * This table is used to select the ingress queue for
+ * ingress traffic, targeting a GPP core or another.
+ * In addition it can be used to discard traffic that
+ * matches the set rule. It is either an exact match table
+ * or a TCAM table, depending on DPNI_OPT_ HAS_KEY_MASKING
+ * bit in OPTIONS field. This field is ignored if
+ * DPNI_OPT_NO_FS bit is set in OPTIONS field. Otherwise,
+ * value 0 defaults to 64. Maximum supported value is 1024.
+ * Note that the total number of entries is limited on the
+ * SoC to as low as 512 entries if TCAM is used.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering
+ * table. This is an exact match table used to filter
+ * ingress traffic based on VLAN IDs. Value 0 disables VLAN
+ * filtering. Maximum supported value is 16.
+ * @mac_filter_entries: Number of entries in the MAC address filtering
+ * table. This is an exact match table and allows both
+ * unicast and multicast entries. The primary MAC address
+ * of the network interface is not part of this table,
+ * this contains only entries in addition to it. This
+ * field is ignored if DPNI_OPT_ NO_MAC_FILTER is set in
+ * OPTIONS field. Otherwise, value 0 defaults to 80.
+ * Maximum supported value is 80.
+ * @num_queues: Number of Tx and Rx queues used for traffic
+ * distribution. This is orthogonal to QoS and is only
+ * used to distribute traffic to multiple GPP cores.
+ * This configuration affects the number of Tx queues
+ * (logical FQs, all associated with a single CEETM queue),
+ * Rx queues and Tx confirmation queues, if applicable.
+ * Value 0 defaults to one queue. Maximum supported value
+ * is 8.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * TCs can have different priority levels for the purpose
+ * of Tx scheduling (see DPNI_SET_TX_SELECTION), different
+ * BPs (DPNI_ SET_POOLS), policers. There are dedicated QM
+ * queues for traffic classes (including class queues on
+ * Tx). Value 0 defaults to one TC. Maximum supported value
+ * is 8.
+ * @qos_entries: Number of entries in the QoS classification table. This
+ * table is used to select the TC for ingress traffic. It
+ * is either an exact match or a TCAM table, depending on
+ * DPNI_OPT_ HAS_KEY_MASKING bit in OPTIONS field. This
+ * field is ignored if the DPNI has a single TC. Otherwise,
+ * a value of 0 defaults to 64. Maximum supported value
+ * is 64.
+ */
+ uint32_t options;
+ uint16_t fs_entries;
+ uint8_t vlan_filter_entries;
+ uint8_t mac_filter_entries;
+ uint8_t num_queues;
+ uint8_t num_tcs;
+ uint8_t qos_entries;
+};
+
+/**
+ * dpni_create() - Create the DPNI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPNI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpni_cfg *cfg,
+ uint32_t *obj_id);
+
+/**
+ * dpni_destroy() - Destroy the DPNI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
+
+/**
+ * struct dpni_pools_cfg - Structure representing buffer pools configuration
+ * @num_dpbp: Number of DPBPs
+ * @pools: Array of buffer pools parameters; The number of valid entries
+ * must match 'num_dpbp' value
+ */
+struct dpni_pools_cfg {
+ uint8_t num_dpbp;
+ /**
+ * struct pools - Buffer pools parameters
+ * @dpbp_id: DPBP object ID
+ * @buffer_size: Buffer size
+ * @backup_pool: Backup pool
+ */
+ struct {
+ int dpbp_id;
+ uint16_t buffer_size;
+ int backup_pool;
+ } pools[DPNI_MAX_DPBP];
+};
+
+/**
+ * dpni_set_pools() - Set buffer pools configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Buffer pools configuration
+ *
+ * mandatory for DPNI operation
+ * warning:Allowed only when DPNI is disabled
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_pools(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpni_pools_cfg *cfg);
+
+/**
+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * dpni_is_enabled() - Check if the DPNI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_is_enabled(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpni_reset() - Reset the DPNI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+/**
+ * struct dpni_attr - Structure representing DPNI attributes
+ * @options: Any combination of the following options:
+ * DPNI_OPT_TX_FRM_RELEASE
+ * DPNI_OPT_NO_MAC_FILTER
+ * DPNI_OPT_HAS_POLICING
+ * DPNI_OPT_SHARED_CONGESTION
+ * DPNI_OPT_HAS_KEY_MASKING
+ * DPNI_OPT_NO_FS
+ * @num_queues: Number of Tx and Rx queues used for traffic distribution.
+ * @num_tcs: Number of traffic classes (TCs), reserved for the DPNI.
+ * @mac_filter_entries: Number of entries in the MAC address filtering
+ * table.
+ * @vlan_filter_entries: Number of entries in the VLAN address filtering
+ * table.
+ * @qos_entries: Number of entries in the QoS classification table.
+ * @fs_entries: Number of entries in the flow steering table.
+ * @qos_key_size: Size, in bytes, of the QoS look-up key. Defining a key larger
+ * than this when adding QoS entries will result
+ * in an error.
+ * @fs_key_size: Size, in bytes, of the flow steering look-up key. Defining a
+ * key larger than this when composing the hash + FS key
+ * will result in an error.
+ * @wriop_version: Version of WRIOP HW block.
+ * The 3 version values are stored on 6, 5, 5 bits
+ * respectively.
+ * Values returned:
+ * - 0x400 - WRIOP version 1.0.0, used on LS2080 and
+ * variants,
+ * - 0x421 - WRIOP version 1.1.1, used on LS2088 and
+ * variants,
+ * - 0x422 - WRIOP version 1.1.2, used on LS1088 and
+ * variants.
+ */
+struct dpni_attr {
+ uint32_t options;
+ uint8_t num_queues;
+ uint8_t num_tcs;
+ uint8_t mac_filter_entries;
+ uint8_t vlan_filter_entries;
+ uint8_t qos_entries;
+ uint16_t fs_entries;
+ uint8_t qos_key_size;
+ uint8_t fs_key_size;
+ uint16_t wriop_version;
+};
+
+/**
+ * dpni_get_attributes() - Retrieve DPNI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @attr: Object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_attr *attr);
+
+/**
+ * DPNI errors
+ */
+
+/**
+ * Extract out of frame header error
+ */
+#define DPNI_ERROR_EOFHE 0x00020000
+/**
+ * Frame length error
+ */
+#define DPNI_ERROR_FLE 0x00002000
+/**
+ * Frame physical error
+ */
+#define DPNI_ERROR_FPE 0x00001000
+/**
+ * Parsing header error
+ */
+#define DPNI_ERROR_PHE 0x00000020
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L3CE 0x00000004
+/**
+ * Parser L3 checksum error
+ */
+#define DPNI_ERROR_L4CE 0x00000001
+
+/**
+ * enum dpni_error_action - Defines DPNI behavior for errors
+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame
+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow
+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue
+ */
+enum dpni_error_action {
+ DPNI_ERROR_ACTION_DISCARD = 0,
+ DPNI_ERROR_ACTION_CONTINUE = 1,
+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2
+};
+
+/**
+ * struct dpni_error_cfg - Structure representing DPNI errors treatment
+ * @errors: Errors mask; use 'DPNI_ERROR__<X>
+ * @error_action: The desired action for the errors mask
+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation
+ * status (FAS); relevant only for the non-discard action
+ */
+struct dpni_error_cfg {
+ uint32_t errors;
+ enum dpni_error_action error_action;
+ int set_frame_annotation;
+};
+
+/**
+ * dpni_set_errors_behavior() - Set errors behavior
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @cfg: Errors configuration
+ *
+ * this function may be called numerous times with different
+ * error masks
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_error_cfg *cfg);
+
+/**
+ * DPNI buffer layout modification options
+ */
+
+/**
+ * Select to modify the time-stamp setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001
+/**
+ * Select to modify the parser-result setting; not applicable for Tx
+ */
+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002
+/**
+ * Select to modify the frame-status setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004
+/**
+ * Select to modify the private-data-size setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008
+/**
+ * Select to modify the data-alignment setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010
+/**
+ * Select to modify the data-head-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020
+/**
+ * Select to modify the data-tail-room setting
+ */
+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040
+
+/**
+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout
+ * @options: Flags representing the suggested modifications to the buffer
+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_<X>' flags
+ * @pass_timestamp: Pass timestamp value
+ * @pass_parser_result: Pass parser results
+ * @pass_frame_status: Pass frame status
+ * @private_data_size: Size kept for private data (in bytes)
+ * @data_align: Data alignment
+ * @data_head_room: Data head room
+ * @data_tail_room: Data tail room
+ */
+struct dpni_buffer_layout {
+ uint32_t options;
+ int pass_timestamp;
+ int pass_parser_result;
+ int pass_frame_status;
+ uint16_t private_data_size;
+ uint16_t data_align;
+ uint16_t data_head_room;
+ uint16_t data_tail_room;
+};
+
+/**
+ * enum dpni_queue_type - Identifies a type of queue targeted by the command
+ * @DPNI_QUEUE_RX: Rx queue
+ * @DPNI_QUEUE_TX: Tx queue
+ * @DPNI_QUEUE_TX_CONFIRM: Tx confirmation queue
+ * @DPNI_QUEUE_RX_ERR: Rx error queue
+ */enum dpni_queue_type {
+ DPNI_QUEUE_RX,
+ DPNI_QUEUE_TX,
+ DPNI_QUEUE_TX_CONFIRM,
+ DPNI_QUEUE_RX_ERR,
+};
+
+/**
+ * dpni_get_buffer_layout() - Retrieve buffer layout attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to get the layout from
+ * @layout: Returns buffer layout attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ struct dpni_buffer_layout *layout);
+
+/**
+ * dpni_set_buffer_layout() - Set buffer layout configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to set layout on
+ * @layout: Buffer layout configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_buffer_layout(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ const struct dpni_buffer_layout *layout);
+
+/**
+ * enum dpni_offload - Identifies a type of offload targeted by the command
+ * @DPNI_OFF_RX_L3_CSUM: Rx L3 checksum validation
+ * @DPNI_OFF_RX_L4_CSUM: Rx L4 checksum validation
+ * @DPNI_OFF_TX_L3_CSUM: Tx L3 checksum generation
+ * @DPNI_OFF_TX_L4_CSUM: Tx L4 checksum generation
+ */
+enum dpni_offload {
+ DPNI_OFF_RX_L3_CSUM,
+ DPNI_OFF_RX_L4_CSUM,
+ DPNI_OFF_TX_L3_CSUM,
+ DPNI_OFF_TX_L4_CSUM,
+};
+
+/**
+ * dpni_set_offload() - Set DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, non-zero value enables
+ * the offload.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_set_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t config);
+
+/**
+ * dpni_get_offload() - Get DPNI offload configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @type: Type of DPNI offload
+ * @config: Offload configuration.
+ * For checksum offloads, a value of 1 indicates that the
+ * offload is enabled.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * @warning Allowed only when DPNI is disabled
+ */
+int dpni_get_offload(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_offload type,
+ uint32_t *config);
+
+/**
+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used
+ * for enqueue operations
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue to get QDID for. For applications lookig to
+ * transmit traffic this should be set to DPNI_QUEUE_TX
+ * @qdid: Returned virtual QDID value that should be used as an argument
+ * in all enqueue operations
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_qdid(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint16_t *qdid);
+
+#define DPNI_STATISTICS_CNT 7
+
+union dpni_statistics {
+ /**
+ * struct page_0 - Page_0 statistics structure
+ * @ingress_all_frames: Ingress frame count
+ * @ingress_all_bytes: Ingress byte count
+ * @ingress_multicast_frames: Ingress multicast frame count
+ * @ingress_multicast_bytes: Ingress multicast byte count
+ * @ingress_broadcast_frames: Ingress broadcast frame count
+ * @ingress_broadcast_bytes: Ingress broadcast byte count
+ */
+ struct {
+ uint64_t ingress_all_frames;
+ uint64_t ingress_all_bytes;
+ uint64_t ingress_multicast_frames;
+ uint64_t ingress_multicast_bytes;
+ uint64_t ingress_broadcast_frames;
+ uint64_t ingress_broadcast_bytes;
+ } page_0;
+ /**
+ * struct page_1 - Page_1 statistics structure
+ * @egress_all_frames: Egress frame count
+ * @egress_all_bytes: Egress byte count
+ * @egress_multicast_frames: Egress multicast frame count
+ * @egress_multicast_bytes: Egress multicast byte count
+ * @egress_broadcast_frames: Egress broadcast frame count
+ * @egress_broadcast_bytes: Egress broadcast byte count
+ */
+ struct {
+ uint64_t egress_all_frames;
+ uint64_t egress_all_bytes;
+ uint64_t egress_multicast_frames;
+ uint64_t egress_multicast_bytes;
+ uint64_t egress_broadcast_frames;
+ uint64_t egress_broadcast_bytes;
+ } page_1;
+ /**
+ * struct page_2 - Page_2 statistics structure
+ * @ingress_filtered_frames: Ingress filtered frame count
+ * @ingress_discarded_frames: Ingress discarded frame count
+ * @ingress_nobuffer_discards: Ingress discarded frame count due to
+ * lack of buffers
+ * @egress_discarded_frames: Egress discarded frame count
+ * @egress_confirmed_frames: Egress confirmed frame count
+ */
+ struct {
+ uint64_t ingress_filtered_frames;
+ uint64_t ingress_discarded_frames;
+ uint64_t ingress_nobuffer_discards;
+ uint64_t egress_discarded_frames;
+ uint64_t egress_confirmed_frames;
+ } page_2;
+ /**
+ * struct raw - raw statistics structure, used to index counters
+ */
+ struct {
+ uint64_t counter[DPNI_STATISTICS_CNT];
+ } raw;
+};
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL
+/**
+ * Enable half-duplex mode
+ */
+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+/**
+ * Enable pause frames
+ */
+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * struct dpni_link_state - Structure representing DPNI link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPNI_LINK_OPT_<X>' values
+ * @up: Link state; '0' for down, '1' for up
+ */
+struct dpni_link_state {
+ uint32_t rate;
+ uint64_t options;
+ int up;
+};
+
+/**
+ * dpni_get_link_state() - Return the link state (either up or down)
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @state: Returned link state;
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_link_state(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpni_link_state *state);
+
+/**
+ * dpni_set_max_frame_length() - Set the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t max_frame_length);
+
+/**
+ * dpni_get_max_frame_length() - Get the maximum received frame length.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @max_frame_length: Maximum received frame length (in
+ * bytes); frame is discarded if its
+ * length exceeds this value
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint16_t *max_frame_length);
+
+
+/**
+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int en);
+
+/**
+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Returns '1' if enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ int *en);
+
+/**
+ * dpni_set_primary_mac_addr() - Set the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address to set as primary address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const uint8_t mac_addr[6]);
+
+/**
+ * dpni_get_primary_mac_addr() - Get the primary MAC address
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: Returned MAC address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6]);
+
+
+/**
+ * dpni_get_port_mac_addr() - Retrieve MAC address associated to the physical
+ * port the DPNI is attached to
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mac_addr: MAC address of the physical port, if any, otherwise 0
+ *
+ * The primary MAC address is not modified by this operation.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t mac_addr[6]);
+
+/**
+ * enum dpni_dist_mode - DPNI distribution mode
+ * @DPNI_DIST_MODE_NONE: No distribution
+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if
+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation
+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if
+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation
+ */
+enum dpni_dist_mode {
+ DPNI_DIST_MODE_NONE = 0,
+ DPNI_DIST_MODE_HASH = 1,
+ DPNI_DIST_MODE_FS = 2
+};
+
+/**
+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action
+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame
+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id
+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash
+ */
+enum dpni_fs_miss_action {
+ DPNI_FS_MISS_DROP = 0,
+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1,
+ DPNI_FS_MISS_HASH = 2
+};
+
+/**
+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration
+ * @miss_action: Miss action selection
+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID'
+ */
+struct dpni_fs_tbl_cfg {
+ enum dpni_fs_miss_action miss_action;
+ uint16_t default_flow_id;
+};
+
+/**
+ * dpni_prepare_key_cfg() - function prepare extract parameters
+ * @cfg: defining a full Key Generation profile (rule)
+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA
+ *
+ * This function has to be called before the following functions:
+ * - dpni_set_rx_tc_dist()
+ * - dpni_set_qos_table()
+ */
+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg,
+ uint8_t *key_cfg_buf);
+
+/**
+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration
+ * @dist_size: Set the distribution size;
+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96,
+ * 112,128,192,224,256,384,448,512,768,896,1024
+ * @dist_mode: Distribution mode
+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
+ * the extractions to be used for the distribution key by calling
+ * dpni_prepare_key_cfg() relevant only when
+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0'
+ * @fs_cfg: Flow Steering table configuration; only relevant if
+ * 'dist_mode = DPNI_DIST_MODE_FS'
+ */
+struct dpni_rx_tc_dist_cfg {
+ uint16_t dist_size;
+ enum dpni_dist_mode dist_mode;
+ uint64_t key_cfg_iova;
+ struct dpni_fs_tbl_cfg fs_cfg;
+};
+
+/**
+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @tc_id: Traffic class selection (0-7)
+ * @cfg: Traffic class distribution configuration
+ *
+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg()
+ * first to prepare the key_cfg_iova parameter
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t tc_id,
+ const struct dpni_rx_tc_dist_cfg *cfg);
+
+/**
+ * enum dpni_dest - DPNI destination types
+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
+ * does not generate FQDAN notifications; user is expected to
+ * dequeue from the queue based on polling or other user-defined
+ * method
+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue
+ * from the queue only after notification is received
+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON
+ * object; user is expected to dequeue from the DPCON channel
+ */
+enum dpni_dest {
+ DPNI_DEST_NONE = 0,
+ DPNI_DEST_DPIO = 1,
+ DPNI_DEST_DPCON = 2
+};
+
+
+/**
+ * struct dpni_queue - Queue structure
+ * @user_context: User data, presented to the user along with any frames
+ * from this queue. Not relevant for Tx queues.
+ */
+struct dpni_queue {
+ /**
+ * struct destination - Destination structure
+ * @id: ID of the destination, only relevant if DEST_TYPE is > 0.
+ * Identifies either a DPIO or a DPCON object.
+ * Not relevant for Tx queues.
+ * @type: May be one of the following:
+ * 0 - No destination, queue can be manually
+ * queried, but will not push traffic or
+ * notifications to a DPIO;
+ * 1 - The destination is a DPIO. When traffic
+ * becomes available in the queue a FQDAN
+ * (FQ data available notification) will be
+ * generated to selected DPIO;
+ * 2 - The destination is a DPCON. The queue is
+ * associated with a DPCON object for the
+ * purpose of scheduling between multiple
+ * queues. The DPCON may be independently
+ * configured to generate notifications.
+ * Not relevant for Tx queues.
+ * @hold_active: Hold active, maintains a queue scheduled for longer
+ * in a DPIO during dequeue to reduce spread of traffic.
+ * Only relevant if queues are
+ * not affined to a single DPIO.
+ */
+ struct {
+ uint16_t id;
+ enum dpni_dest type;
+ char hold_active;
+ uint8_t priority;
+ } destination;
+ uint64_t user_context;
+ /**
+ * struct flc - FD FLow Context structure
+ * @value: FLC value to set
+ * @stash_control: Boolean, indicates whether the 6 lowest
+ * significant bits are used for stash control.
+ */
+ struct {
+ uint64_t value;
+ char stash_control;
+ } flc;
+};
+
+/**
+ * struct dpni_queue_id - Queue identification, used for enqueue commands
+ * or queue control
+ * @fqid: FQID used for enqueueing to and/or configuration of this
+ * specific FQ
+ * @qdbin: Queueing bin, used to enqueue using QDID, DQBIN, QPRI.
+ * Only relevant for Tx queues.
+ */
+struct dpni_queue_id {
+ uint32_t fqid;
+ uint16_t qdbin;
+};
+
+/**
+ * enum dpni_confirmation_mode - Defines DPNI options supported for Tx
+ * confirmation
+ * @DPNI_CONF_AFFINE: For each Tx queue set associated with a sender there is
+ * an affine Tx Confirmation queue
+ * @DPNI_CONF_SINGLE: All Tx queues are associated with a single Tx
+ * confirmation queue
+ * @DPNI_CONF_DISABLE: Tx frames are not confirmed. This must be associated
+ * with proper FD set-up to have buffers release to a Buffer Pool, otherwise
+ * buffers will be leaked
+ */
+enum dpni_confirmation_mode {
+ DPNI_CONF_AFFINE,
+ DPNI_CONF_SINGLE,
+ DPNI_CONF_DISABLE,
+};
+
+/**
+ * dpni_set_tx_confirmation_mode() - Tx confirmation mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @mode: Tx confirmation mode
+ *
+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not
+ * selected at DPNI creation.
+ * Calling this function with 'mode' set to DPNI_CONF_DISABLE disables all
+ * transmit confirmation (including the private confirmation queues), regardless
+ * of previous settings; Note that in this case, Tx error frames are still
+ * enqueued to the general transmit errors queue.
+ * Calling this function with 'mode' set to DPNI_CONF_SINGLE switches all
+ * Tx confirmations to a shared Tx conf queue. The ID of the queue when
+ * calling dpni_set/get_queue is -1.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_tx_confirmation_mode(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_confirmation_mode mode);
+
+/**
+ * dpni_get_api_version() - Get Data Path Network Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path network interface API
+ * @minor_ver: Minor version of data path network interface API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
+
+/**
+ * Set User Context
+ */
+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Set queue destination configuration
+ */
+#define DPNI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Set FD[FLC] configuration for traffic on this queue. Note that FLC values
+ * set with dpni_add_fs_entry, if any, take precedence over values per queue.
+ */
+#define DPNI_QUEUE_OPT_FLC 0x00000004
+
+/**
+ * Set the queue to hold active mode. This prevents the queue from being
+ * rescheduled between DPIOs while it carries traffic and is active on one
+ * DPNI. Can help reduce reordering when servicing one queue on multiple
+ * CPUs, but the queue is also less likely to push data to multiple CPUs
+ * especially when congested.
+ */
+#define DPNI_QUEUE_OPT_HOLD_ACTIVE 0x00000008
+
+/**
+ * dpni_set_queue() - Set queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported, although
+ * the command is ignored for Tx
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set
+ * allocated for the same TC.Value must be in
+ * range 0 to NUM_QUEUES - 1
+ * @options: A combination of DPNI_QUEUE_OPT_ values that control
+ * what configuration options are set on the queue
+ * @queue: Queue configuration structure
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_set_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ uint8_t options,
+ const struct dpni_queue *queue);
+
+/**
+ * dpni_get_queue() - Get queue parameters
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @qtype: Type of queue - all queue types are supported
+ * @tc: Traffic class, in range 0 to NUM_TCS - 1
+ * @index: Selects the specific queue out of the set allocated
+ * for the same TC. Value must be in range 0 to
+ * NUM_QUEUES - 1
+ * @queue: Queue configuration structure
+ * @qid: Queue identification
+ *
+ * This function returns current queue configuration which can be changed by
+ * calling dpni_set_queue, and queue identification information.
+ * Returned qid.fqid and/or qid.qdbin values can be used to:
+ * - enqueue traffic for Tx queues,
+ * - perform volatile dequeue for Rx and, if applicable, Tx confirmation
+ * clean-up,
+ * - retrieve queue state.
+ *
+ * All these operations are supported through the DPIO run-time API.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ enum dpni_queue_type qtype,
+ uint8_t tc,
+ uint8_t index,
+ struct dpni_queue *queue,
+ struct dpni_queue_id *qid);
+
+/**
+ * dpni_get_statistics() - Get DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @page: Selects the statistics page to retrieve, see
+ * DPNI_GET_STATISTICS output.
+ * Pages are numbered 0 to 2.
+ * @stat: Structure containing the statistics
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_get_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t page,
+ union dpni_statistics *stat);
+
+/**
+ * dpni_reset_statistics() - Clears DPNI statistics
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_reset_statistics(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
+
+#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/dpaa2/mc/fsl_dpni_cmd.h b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
new file mode 100644
index 00000000..bb92ea89
--- /dev/null
+++ b/drivers/net/dpaa2/mc/fsl_dpni_cmd.h
@@ -0,0 +1,334 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright (c) 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPNI_CMD_H
+#define _FSL_DPNI_CMD_H
+
+/* DPNI Version */
+#define DPNI_VER_MAJOR 7
+#define DPNI_VER_MINOR 0
+
+/* Command IDs */
+#define DPNI_CMDID_OPEN ((0x801 << 4) | (0x1))
+#define DPNI_CMDID_CLOSE ((0x800 << 4) | (0x1))
+#define DPNI_CMDID_CREATE ((0x901 << 4) | (0x1))
+#define DPNI_CMDID_DESTROY ((0x981 << 4) | (0x1))
+#define DPNI_CMDID_GET_API_VERSION ((0xa01 << 4) | (0x1))
+
+#define DPNI_CMDID_ENABLE ((0x002 << 4) | (0x1))
+#define DPNI_CMDID_DISABLE ((0x003 << 4) | (0x1))
+#define DPNI_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
+#define DPNI_CMDID_RESET ((0x005 << 4) | (0x1))
+#define DPNI_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
+
+#define DPNI_CMDID_SET_POOLS ((0x200 << 4) | (0x1))
+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR ((0x20B << 4) | (0x1))
+
+#define DPNI_CMDID_GET_QDID ((0x210 << 4) | (0x1))
+#define DPNI_CMDID_GET_LINK_STATE ((0x215 << 4) | (0x1))
+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH ((0x216 << 4) | (0x1))
+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH ((0x217 << 4) | (0x1))
+
+#define DPNI_CMDID_SET_UNICAST_PROMISC ((0x222 << 4) | (0x1))
+#define DPNI_CMDID_GET_UNICAST_PROMISC ((0x223 << 4) | (0x1))
+#define DPNI_CMDID_SET_PRIM_MAC ((0x224 << 4) | (0x1))
+#define DPNI_CMDID_GET_PRIM_MAC ((0x225 << 4) | (0x1))
+
+#define DPNI_CMDID_SET_RX_TC_DIST ((0x235 << 4) | (0x1))
+
+#define DPNI_CMDID_GET_STATISTICS ((0x25D << 4) | (0x1))
+#define DPNI_CMDID_RESET_STATISTICS ((0x25E << 4) | (0x1))
+#define DPNI_CMDID_GET_QUEUE ((0x25F << 4) | (0x1))
+#define DPNI_CMDID_SET_QUEUE ((0x260 << 4) | (0x1))
+
+#define DPNI_CMDID_GET_PORT_MAC_ADDR ((0x263 << 4) | (0x1))
+
+#define DPNI_CMDID_GET_BUFFER_LAYOUT ((0x264 << 4) | (0x1))
+#define DPNI_CMDID_SET_BUFFER_LAYOUT ((0x265 << 4) | (0x1))
+
+#define DPNI_CMDID_GET_OFFLOAD ((0x26B << 4) | (0x1))
+#define DPNI_CMDID_SET_OFFLOAD ((0x26C << 4) | (0x1))
+#define DPNI_CMDID_SET_TX_CONFIRMATION_MODE ((0x266 << 4) | (0x1))
+#define DPNI_CMDID_GET_TX_CONFIRMATION_MODE ((0x26D << 4) | (0x1))
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_OPEN(cmd, dpni_id) \
+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_CREATE(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, (cfg)->options); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, (cfg)->num_queues); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, (cfg)->num_tcs); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, (cfg)->mac_filter_entries); \
+ MC_CMD_OP(cmd, 1, 0, 8, uint8_t, (cfg)->vlan_filter_entries); \
+ MC_CMD_OP(cmd, 1, 16, 8, uint8_t, (cfg)->qos_entries); \
+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, (cfg)->fs_entries); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_POOLS(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \
+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \
+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \
+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \
+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \
+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \
+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \
+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \
+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \
+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \
+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\
+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \
+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\
+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\
+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\
+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\
+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \
+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\
+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \
+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\
+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \
+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_IS_ENABLED(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* DPNI_CMD_GET_ATTR is not used, no input parameters */
+
+#define DPNI_RSP_GET_ATTR(cmd, attr) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, (attr)->options); \
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, (attr)->num_queues); \
+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, (attr)->num_tcs); \
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, (attr)->mac_filter_entries); \
+ MC_RSP_OP(cmd, 1, 0, 8, uint8_t, (attr)->vlan_filter_entries); \
+ MC_RSP_OP(cmd, 1, 16, 8, uint8_t, (attr)->qos_entries); \
+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, (attr)->fs_entries); \
+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, (attr)->qos_key_size); \
+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, (attr)->fs_key_size); \
+ MC_RSP_OP(cmd, 2, 16, 16, uint16_t, (attr)->wriop_version); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \
+ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \
+ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \
+} while (0)
+
+#define DPNI_CMD_GET_BUFFER_LAYOUT(cmd, qtype) \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype)
+
+#define DPNI_RSP_GET_BUFFER_LAYOUT(cmd, layout) \
+do { \
+ MC_RSP_OP(cmd, 0, 48, 1, char, (layout)->pass_timestamp); \
+ MC_RSP_OP(cmd, 0, 49, 1, char, (layout)->pass_parser_result); \
+ MC_RSP_OP(cmd, 0, 50, 1, char, (layout)->pass_frame_status); \
+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, (layout)->private_data_size); \
+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, (layout)->data_align); \
+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, (layout)->data_head_room); \
+ MC_RSP_OP(cmd, 1, 48, 16, uint16_t, (layout)->data_tail_room); \
+} while (0)
+
+#define DPNI_CMD_SET_BUFFER_LAYOUT(cmd, qtype, layout) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, (layout)->options); \
+ MC_CMD_OP(cmd, 0, 48, 1, char, (layout)->pass_timestamp); \
+ MC_CMD_OP(cmd, 0, 49, 1, char, (layout)->pass_parser_result); \
+ MC_CMD_OP(cmd, 0, 50, 1, char, (layout)->pass_frame_status); \
+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, (layout)->private_data_size); \
+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, (layout)->data_align); \
+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, (layout)->data_head_room); \
+ MC_CMD_OP(cmd, 1, 48, 16, uint16_t, (layout)->data_tail_room); \
+} while (0)
+
+#define DPNI_CMD_SET_OFFLOAD(cmd, type, config) \
+do { \
+ MC_CMD_OP(cmd, 0, 24, 8, enum dpni_offload, type); \
+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, config); \
+} while (0)
+
+#define DPNI_CMD_GET_OFFLOAD(cmd, type) \
+ MC_CMD_OP(cmd, 0, 24, 8, enum dpni_offload, type)
+
+#define DPNI_RSP_GET_OFFLOAD(cmd, config) \
+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, config)
+
+#define DPNI_CMD_GET_QDID(cmd, qtype) \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_QDID(cmd, qdid) \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid)
+
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_GET_STATISTICS(cmd, page) \
+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, page)
+
+#define DPNI_RSP_GET_STATISTICS(cmd, stat) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, (stat)->raw.counter[0]); \
+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, (stat)->raw.counter[1]); \
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (stat)->raw.counter[2]); \
+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, (stat)->raw.counter[3]); \
+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, (stat)->raw.counter[4]); \
+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, (stat)->raw.counter[5]); \
+ MC_RSP_OP(cmd, 6, 0, 64, uint64_t, (stat)->raw.counter[6]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_LINK_STATE(cmd, state) \
+do { \
+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \
+ MC_CMD_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \
+ MC_RSP_OP(cmd, 0, 0, 1, int, en)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \
+do { \
+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \
+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \
+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \
+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \
+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \
+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \
+} while (0)
+
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \
+ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \
+ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \
+ cfg->fs_cfg.miss_action); \
+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \
+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \
+} while (0)
+
+#define DPNI_CMD_GET_QUEUE(cmd, qtype, tc, index) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, index); \
+} while (0)
+
+#define DPNI_RSP_GET_QUEUE(cmd, queue, queue_id) \
+do { \
+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, (queue)->destination.id); \
+ MC_RSP_OP(cmd, 1, 48, 8, uint8_t, (queue)->destination.priority); \
+ MC_RSP_OP(cmd, 1, 56, 4, enum dpni_dest, (queue)->destination.type); \
+ MC_RSP_OP(cmd, 1, 62, 1, char, (queue)->flc.stash_control); \
+ MC_RSP_OP(cmd, 1, 63, 1, char, (queue)->destination.hold_active); \
+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, (queue)->flc.value); \
+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, (queue)->user_context); \
+ MC_RSP_OP(cmd, 4, 0, 32, uint32_t, (queue_id)->fqid); \
+ MC_RSP_OP(cmd, 4, 32, 16, uint16_t, (queue_id)->qdbin); \
+} while (0)
+
+#define DPNI_CMD_SET_QUEUE(cmd, qtype, tc, index, options, queue) \
+do { \
+ MC_CMD_OP(cmd, 0, 0, 8, enum dpni_queue_type, qtype); \
+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc); \
+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, index); \
+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, options); \
+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, (queue)->destination.id); \
+ MC_CMD_OP(cmd, 1, 48, 8, uint8_t, (queue)->destination.priority); \
+ MC_CMD_OP(cmd, 1, 56, 4, enum dpni_dest, (queue)->destination.type); \
+ MC_CMD_OP(cmd, 1, 62, 1, char, (queue)->flc.stash_control); \
+ MC_CMD_OP(cmd, 1, 63, 1, char, (queue)->destination.hold_active); \
+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, (queue)->flc.value); \
+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, (queue)->user_context); \
+} while (0)
+
+/* cmd, param, offset, width, type, arg_name */
+#define DPNI_RSP_GET_API_VERSION(cmd, major, minor) \
+do { \
+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
+} while (0)
+
+
+#define DPNI_CMD_SET_TX_CONFIRMATION_MODE(cmd, mode) \
+ MC_CMD_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
+
+#define DPNI_RSP_GET_TX_CONFIRMATION_MODE(cmd, mode) \
+ MC_RSP_OP(cmd, 0, 32, 8, enum dpni_confirmation_mode, mode)
+
+#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/dpaa2/mc/fsl_net.h b/drivers/net/dpaa2/mc/fsl_net.h
new file mode 100644
index 00000000..ef7e4dac
--- /dev/null
+++ b/drivers/net/dpaa2/mc/fsl_net.h
@@ -0,0 +1,487 @@
+/*-
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * BSD LICENSE
+ *
+ * Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_NET_H
+#define __FSL_NET_H
+
+#define LAST_HDR_INDEX 0xFFFFFFFF
+
+/*****************************************************************************/
+/* Protocol fields */
+/*****************************************************************************/
+
+/************************* Ethernet fields *********************************/
+#define NH_FLD_ETH_DA (1)
+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1)
+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2)
+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3)
+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4)
+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5)
+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1)
+
+#define NH_FLD_ETH_ADDR_SIZE 6
+
+/*************************** VLAN fields ***********************************/
+#define NH_FLD_VLAN_VPRI (1)
+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1)
+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2)
+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3)
+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4)
+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1)
+
+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \
+ NH_FLD_VLAN_CFI | \
+ NH_FLD_VLAN_VID)
+
+/************************ IP (generic) fields ******************************/
+#define NH_FLD_IP_VER (1)
+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2)
+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3)
+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4)
+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5)
+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6)
+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7)
+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8)
+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1)
+
+#define NH_FLD_IP_PROTO_SIZE 1
+
+/***************************** IPV4 fields *********************************/
+#define NH_FLD_IPV4_VER (1)
+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1)
+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2)
+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3)
+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4)
+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5)
+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6)
+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7)
+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8)
+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9)
+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10)
+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11)
+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12)
+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13)
+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14)
+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1)
+
+#define NH_FLD_IPV4_ADDR_SIZE 4
+#define NH_FLD_IPV4_PROTO_SIZE 1
+
+/***************************** IPV6 fields *********************************/
+#define NH_FLD_IPV6_VER (1)
+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1)
+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2)
+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3)
+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4)
+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5)
+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6)
+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7)
+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1)
+
+#define NH_FLD_IPV6_ADDR_SIZE 16
+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1
+
+/***************************** ICMP fields *********************************/
+#define NH_FLD_ICMP_TYPE (1)
+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1)
+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2)
+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3)
+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4)
+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1)
+
+#define NH_FLD_ICMP_CODE_SIZE 1
+#define NH_FLD_ICMP_TYPE_SIZE 1
+
+/***************************** IGMP fields *********************************/
+#define NH_FLD_IGMP_VERSION (1)
+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1)
+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2)
+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3)
+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1)
+
+/***************************** TCP fields **********************************/
+#define NH_FLD_TCP_PORT_SRC (1)
+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1)
+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2)
+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3)
+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4)
+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5)
+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6)
+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7)
+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8)
+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9)
+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10)
+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1)
+
+#define NH_FLD_TCP_PORT_SIZE 2
+
+/***************************** UDP fields **********************************/
+#define NH_FLD_UDP_PORT_SRC (1)
+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1)
+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2)
+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3)
+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1)
+
+#define NH_FLD_UDP_PORT_SIZE 2
+
+/*************************** UDP-lite fields *******************************/
+#define NH_FLD_UDP_LITE_PORT_SRC (1)
+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1)
+#define NH_FLD_UDP_LITE_ALL_FIELDS \
+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1)
+
+#define NH_FLD_UDP_LITE_PORT_SIZE 2
+
+/*************************** UDP-encap-ESP fields **************************/
+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1)
+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1)
+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2)
+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3)
+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4)
+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5)
+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \
+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1)
+
+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2
+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4
+
+/***************************** SCTP fields *********************************/
+#define NH_FLD_SCTP_PORT_SRC (1)
+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1)
+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2)
+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3)
+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1)
+
+#define NH_FLD_SCTP_PORT_SIZE 2
+
+/***************************** DCCP fields *********************************/
+#define NH_FLD_DCCP_PORT_SRC (1)
+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1)
+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1)
+
+#define NH_FLD_DCCP_PORT_SIZE 2
+
+/***************************** IPHC fields *********************************/
+#define NH_FLD_IPHC_CID (1)
+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1)
+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2)
+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3)
+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4)
+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1)
+
+/***************************** SCTP fields *********************************/
+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1)
+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1)
+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2)
+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4)
+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5)
+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6)
+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7)
+#define NH_FLD_SCTP_CHUNK_DATA_BEGINNING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8)
+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9)
+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \
+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1)
+
+/*************************** L2TPV2 fields *********************************/
+#define NH_FLD_L2TPV2_TYPE_BIT (1)
+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1)
+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2)
+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3)
+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4)
+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5)
+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6)
+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7)
+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8)
+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9)
+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10)
+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11)
+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12)
+#define NH_FLD_L2TPV2_ALL_FIELDS \
+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1)
+
+/*************************** L2TPV3 fields *********************************/
+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1)
+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1)
+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2)
+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3)
+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4)
+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5)
+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6)
+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7)
+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8)
+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \
+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1)
+
+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1)
+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1)
+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2)
+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3)
+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \
+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1)
+
+/**************************** PPP fields ***********************************/
+#define NH_FLD_PPP_PID (1)
+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1)
+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1)
+
+/************************** PPPoE fields ***********************************/
+#define NH_FLD_PPPOE_VER (1)
+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1)
+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2)
+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3)
+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4)
+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5)
+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6)
+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1)
+
+/************************* PPP-Mux fields **********************************/
+#define NH_FLD_PPPMUX_PID (1)
+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1)
+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2)
+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1)
+
+/*********************** PPP-Mux sub-frame fields **************************/
+#define NH_FLD_PPPMUX_SUBFRM_PFF (1)
+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1)
+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2)
+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3)
+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4)
+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \
+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1)
+
+/*************************** LLC fields ************************************/
+#define NH_FLD_LLC_DSAP (1)
+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1)
+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2)
+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1)
+
+/*************************** NLPID fields **********************************/
+#define NH_FLD_NLPID_NLPID (1)
+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1)
+
+/*************************** SNAP fields ***********************************/
+#define NH_FLD_SNAP_OUI (1)
+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1)
+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1)
+
+/*************************** LLC SNAP fields *******************************/
+#define NH_FLD_LLC_SNAP_TYPE (1)
+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1)
+
+#define NH_FLD_ARP_HTYPE (1)
+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1)
+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2)
+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3)
+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4)
+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5)
+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6)
+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7)
+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8)
+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1)
+
+/*************************** RFC2684 fields ********************************/
+#define NH_FLD_RFC2684_LLC (1)
+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1)
+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2)
+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3)
+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4)
+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5)
+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1)
+
+/*************************** User defined fields ***************************/
+#define NH_FLD_USER_DEFINED_SRCPORT (1)
+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1)
+#define NH_FLD_USER_DEFINED_ALL_FIELDS \
+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1)
+
+/*************************** Payload fields ********************************/
+#define NH_FLD_PAYLOAD_BUFFER (1)
+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1)
+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2)
+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3)
+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4)
+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5)
+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1)
+
+/*************************** GRE fields ************************************/
+#define NH_FLD_GRE_TYPE (1)
+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1)
+
+/*************************** MINENCAP fields *******************************/
+#define NH_FLD_MINENCAP_SRC_IP (1)
+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1)
+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2)
+#define NH_FLD_MINENCAP_ALL_FIELDS \
+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1)
+
+/*************************** IPSEC AH fields *******************************/
+#define NH_FLD_IPSEC_AH_SPI (1)
+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1)
+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1)
+
+/*************************** IPSEC ESP fields ******************************/
+#define NH_FLD_IPSEC_ESP_SPI (1)
+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1)
+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1)
+
+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4
+
+/*************************** MPLS fields ***********************************/
+#define NH_FLD_MPLS_LABEL_STACK (1)
+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \
+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1)
+
+/*************************** MACSEC fields *********************************/
+#define NH_FLD_MACSEC_SECTAG (1)
+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1)
+
+/*************************** GTP fields ************************************/
+#define NH_FLD_GTP_TEID (1)
+
+/* Protocol options */
+
+/* Ethernet options */
+#define NH_OPT_ETH_BROADCAST 1
+#define NH_OPT_ETH_MULTICAST 2
+#define NH_OPT_ETH_UNICAST 3
+#define NH_OPT_ETH_BPDU 4
+
+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01)
+/* also applicable for broadcast */
+
+/* VLAN options */
+#define NH_OPT_VLAN_CFI 1
+
+/* IPV4 options */
+#define NH_OPT_IPV4_UNICAST 1
+#define NH_OPT_IPV4_MULTICAST 2
+#define NH_OPT_IPV4_BROADCAST 3
+#define NH_OPT_IPV4_OPTION 4
+#define NH_OPT_IPV4_FRAG 5
+#define NH_OPT_IPV4_INITIAL_FRAG 6
+
+/* IPV6 options */
+#define NH_OPT_IPV6_UNICAST 1
+#define NH_OPT_IPV6_MULTICAST 2
+#define NH_OPT_IPV6_OPTION 3
+#define NH_OPT_IPV6_FRAG 4
+#define NH_OPT_IPV6_INITIAL_FRAG 5
+
+/* General IP options (may be used for any version) */
+#define NH_OPT_IP_FRAG 1
+#define NH_OPT_IP_INITIAL_FRAG 2
+#define NH_OPT_IP_OPTION 3
+
+/* Minenc. options */
+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1
+
+/* GRE. options */
+#define NH_OPT_GRE_ROUTING_PRESENT 1
+
+/* TCP options */
+#define NH_OPT_TCP_OPTIONS 1
+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2
+#define NH_OPT_TCP_CONTROL_LOW_BITS 3
+
+/* CAPWAP options */
+#define NH_OPT_CAPWAP_DTLS 1
+
+enum net_prot {
+ NET_PROT_NONE = 0,
+ NET_PROT_PAYLOAD,
+ NET_PROT_ETH,
+ NET_PROT_VLAN,
+ NET_PROT_IPV4,
+ NET_PROT_IPV6,
+ NET_PROT_IP,
+ NET_PROT_TCP,
+ NET_PROT_UDP,
+ NET_PROT_UDP_LITE,
+ NET_PROT_IPHC,
+ NET_PROT_SCTP,
+ NET_PROT_SCTP_CHUNK_DATA,
+ NET_PROT_PPPOE,
+ NET_PROT_PPP,
+ NET_PROT_PPPMUX,
+ NET_PROT_PPPMUX_SUBFRM,
+ NET_PROT_L2TPV2,
+ NET_PROT_L2TPV3_CTRL,
+ NET_PROT_L2TPV3_SESS,
+ NET_PROT_LLC,
+ NET_PROT_LLC_SNAP,
+ NET_PROT_NLPID,
+ NET_PROT_SNAP,
+ NET_PROT_MPLS,
+ NET_PROT_IPSEC_AH,
+ NET_PROT_IPSEC_ESP,
+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */
+ NET_PROT_MACSEC,
+ NET_PROT_GRE,
+ NET_PROT_MINENCAP,
+ NET_PROT_DCCP,
+ NET_PROT_ICMP,
+ NET_PROT_IGMP,
+ NET_PROT_ARP,
+ NET_PROT_CAPWAP_DATA,
+ NET_PROT_CAPWAP_CTRL,
+ NET_PROT_RFC2684,
+ NET_PROT_ICMPV6,
+ NET_PROT_FCOE,
+ NET_PROT_FIP,
+ NET_PROT_ISCSI,
+ NET_PROT_GTP,
+ NET_PROT_USER_DEFINED_L2,
+ NET_PROT_USER_DEFINED_L3,
+ NET_PROT_USER_DEFINED_L4,
+ NET_PROT_USER_DEFINED_L5,
+ NET_PROT_USER_DEFINED_SHIM1,
+ NET_PROT_USER_DEFINED_SHIM2,
+
+ NET_PROT_DUMMY_LAST
+};
+
+/*! IEEE8021.Q */
+#define NH_IEEE8021Q_ETYPE 0x8100
+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \
+ ((((uint32_t)(etype & 0xFFFF)) << 16) | \
+ (((uint32_t)(pcp & 0x07)) << 13) | \
+ (((uint32_t)(dei & 0x01)) << 12) | \
+ (((uint32_t)(vlan_id & 0xFFF))))
+
+#endif /* __FSL_NET_H */
diff --git a/drivers/net/mpipe/rte_pmd_mpipe_version.map b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
index ad607bbe..8591cc0b 100644
--- a/drivers/net/mpipe/rte_pmd_mpipe_version.map
+++ b/drivers/net/dpaa2/rte_pmd_dpaa2_version.map
@@ -1,3 +1,4 @@
-DPDK_2.2 {
+DPDK_17.05 {
+
local: *;
};
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index 57a60f0f..b5592d6b 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -57,6 +57,9 @@ CFLAGS_BASE_DRIVER += -Wno-unused-variable
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1)
CFLAGS_BASE_DRIVER += -Wno-misleading-indentation
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
endif
endif
endif
@@ -96,9 +99,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_EM_PMD) += em_rxtx.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_E1000_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/e1000/base/README b/drivers/net/e1000/base/README
index 8d48135a..de1ae4cf 100644
--- a/drivers/net/e1000/base/README
+++ b/drivers/net/e1000/base/README
@@ -1,7 +1,7 @@
..
BSD LICENSE
- Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,30 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This directory contains source code of FreeBSD em & igb drivers of version
-cid-shared-code.2015.10.09 released by ND. The sub-directory of base/
+cid-shared-code.2016.11.22 released by ND. The sub-directory of base/
contains the original source package.
+This driver is valid for the product(s) listed below
+* Intel® Ethernet Controller 82540
+* Intel® Ethernet Controller 82545 Series
+* Intel® Ethernet Controller 82546 Series
+* Intel® Ethernet Controller 82571 Series
+* Intel® Ethernet Controller 82572 Series
+* Intel® Ethernet Controller 82573
+* Intel® Ethernet Controller 82574
+* Intel® Ethernet Controller 82583
+* Intel® Ethernet Controller I217 Series
+* Intel® Ethernet Controller I218 Series
+* Intel® Ethernet Controller I219 Series
+* Intel® Ethernet Controller 82576 Series
+* Intel® Ethernet Controller 82575 Series
+* Intel® Ethernet Controller 82580 Series
+* Intel® Ethernet Controller I350 Series
+* Intel® Ethernet Controller I210 Series
+* Intel® Ethernet Controller I211
+* Intel® Ethernet Controller I354 Series
+* Intel® Ethernet Controller DH89XXCC Series
+
Updating the driver
===================
diff --git a/drivers/net/e1000/base/e1000_82575.c b/drivers/net/e1000/base/e1000_82575.c
index 723885d7..c6400bde 100644
--- a/drivers/net/e1000/base/e1000_82575.c
+++ b/drivers/net/e1000/base/e1000_82575.c
@@ -100,7 +100,6 @@ STATIC s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
u16 offset);
STATIC s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
STATIC s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
-STATIC void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
STATIC void e1000_clear_vfta_i350(struct e1000_hw *hw);
STATIC void e1000_i2c_start(struct e1000_hw *hw);
diff --git a/drivers/net/e1000/base/e1000_82575.h b/drivers/net/e1000/base/e1000_82575.h
index c4986841..4133cdd8 100644
--- a/drivers/net/e1000/base/e1000_82575.h
+++ b/drivers/net/e1000/base/e1000_82575.h
@@ -492,6 +492,7 @@ enum e1000_promisc_type {
void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
void e1000_rlpml_set_vf(struct e1000_hw *, u16);
s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
+void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
u16 e1000_rxpbs_adjust_82580(u32 data);
s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data);
s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M);
diff --git a/drivers/net/e1000/base/e1000_api.c b/drivers/net/e1000/base/e1000_api.c
index bbfcae88..f7cf83b6 100644
--- a/drivers/net/e1000/base/e1000_api.c
+++ b/drivers/net/e1000/base/e1000_api.c
@@ -298,6 +298,23 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_PCH_I218_V3:
mac->type = e1000_pch_lpt;
break;
+ case E1000_DEV_ID_PCH_SPT_I219_LM:
+ case E1000_DEV_ID_PCH_SPT_I219_V:
+ case E1000_DEV_ID_PCH_SPT_I219_LM2:
+ case E1000_DEV_ID_PCH_SPT_I219_V2:
+ case E1000_DEV_ID_PCH_LBG_I219_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM4:
+ case E1000_DEV_ID_PCH_SPT_I219_V4:
+ case E1000_DEV_ID_PCH_SPT_I219_LM5:
+ case E1000_DEV_ID_PCH_SPT_I219_V5:
+ mac->type = e1000_pch_spt;
+ break;
+ case E1000_DEV_ID_PCH_CNP_I219_LM6:
+ case E1000_DEV_ID_PCH_CNP_I219_V6:
+ case E1000_DEV_ID_PCH_CNP_I219_LM7:
+ case E1000_DEV_ID_PCH_CNP_I219_V7:
+ mac->type = e1000_pch_cnp;
+ break;
case E1000_DEV_ID_82575EB_COPPER:
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82575GB_QUAD_COPPER:
@@ -448,6 +465,8 @@ s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
e1000_init_function_pointers_ich8lan(hw);
break;
case e1000_82575:
diff --git a/drivers/net/e1000/base/e1000_defines.h b/drivers/net/e1000/base/e1000_defines.h
index 69aa1f23..dbc2bbbe 100644
--- a/drivers/net/e1000/base/e1000_defines.h
+++ b/drivers/net/e1000/base/e1000_defines.h
@@ -198,6 +198,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */
#define E1000_RCTL_RDMTS_HEX 0x00010000
+#define E1000_RCTL_RDMTS1_HEX E1000_RCTL_RDMTS_HEX
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
@@ -468,6 +469,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define ETHERNET_FCS_SIZE 4
#define MAX_JUMBO_FRAME_SIZE 0x3F00
+/* The datasheet maximum supported RX size is 9.5KB (9728 bytes) */
+#define MAX_RX_JUMBO_FRAME_SIZE 0x2600
#define E1000_TX_PTR_GAP 0x1F
/* Extended Configuration Control and Size */
@@ -751,6 +754,12 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
+/* HH Time Sync */
+#define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
+#define E1000_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
+#define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */
+#define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */
+
#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */
#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
diff --git a/drivers/net/e1000/base/e1000_hw.h b/drivers/net/e1000/base/e1000_hw.h
index e4e4f764..d9de9fc1 100644
--- a/drivers/net/e1000/base/e1000_hw.h
+++ b/drivers/net/e1000/base/e1000_hw.h
@@ -136,6 +136,19 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_I218_V2 0x15A1
#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* Sunrise Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* Sunrise Point PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* Sunrise Point-H PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* Sunrise Point-H PCH */
+#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LEWISBURG PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM4 0x15D7
+#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8
+#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3
+#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6
+#define E1000_DEV_ID_PCH_CNP_I219_LM6 0x15BD
+#define E1000_DEV_ID_PCH_CNP_I219_V6 0x15BE
+#define E1000_DEV_ID_PCH_CNP_I219_LM7 0x15BB
+#define E1000_DEV_ID_PCH_CNP_I219_V7 0x15BC
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
@@ -221,6 +234,8 @@ enum e1000_mac_type {
e1000_pchlan,
e1000_pch2lan,
e1000_pch_lpt,
+ e1000_pch_spt,
+ e1000_pch_cnp,
e1000_82575,
e1000_82576,
e1000_82580,
@@ -950,11 +965,15 @@ struct e1000_dev_spec_ich8lan {
E1000_MUTEX nvm_mutex;
E1000_MUTEX swflag_mutex;
bool nvm_k1_enabled;
+ bool disable_k1_off;
bool eee_disable;
u16 eee_lp_ability;
#ifdef ULP_SUPPORT
enum e1000_ulp_state ulp_state;
-#endif /* NAHUM6LP_HW && ULP_SUPPORT */
+ bool ulp_capability_disabled;
+ bool during_suspend_flow;
+ bool during_dpg_exit;
+#endif /* ULP_SUPPORT */
u16 lat_enc;
u16 max_ltr_enc;
bool smbus_disable;
diff --git a/drivers/net/e1000/base/e1000_ich8lan.c b/drivers/net/e1000/base/e1000_ich8lan.c
index 89d07e90..6dd046d2 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.c
+++ b/drivers/net/e1000/base/e1000_ich8lan.c
@@ -94,10 +94,13 @@ STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
bool active);
STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
+STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data);
STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
+STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
u16 *data);
STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
@@ -125,6 +128,14 @@ STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
u32 offset, u8 *data);
STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
u8 size, u16 *data);
+STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data);
+STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 *data);
+STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 data);
+STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword);
STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
u32 offset, u16 *data);
STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
@@ -233,7 +244,7 @@ STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (ret_val)
return false;
out:
- if (hw->mac.type == e1000_pch_lpt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
/* Only unforce SMBus if ME is not active */
if (!(E1000_READ_REG(hw, E1000_FWSM) &
E1000_ICH_FWSM_FW_VALID)) {
@@ -277,7 +288,7 @@ STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
E1000_WRITE_FLUSH(hw);
- usec_delay(10);
+ msec_delay(1);
mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
E1000_WRITE_FLUSH(hw);
@@ -334,6 +345,8 @@ STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -481,6 +494,8 @@ STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -623,36 +638,57 @@ STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
u32 gfpreg, sector_base_addr, sector_end_addr;
u16 i;
+ u32 nvm_size;
DEBUGFUNC("e1000_init_nvm_params_ich8lan");
- /* Can't read flash registers if the register set isn't mapped. */
nvm->type = e1000_nvm_flash_sw;
- if (!hw->flash_address) {
- DEBUGOUT("ERROR: Flash registers not mapped\n");
- return -E1000_ERR_CONFIG;
- }
- gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
+ if (hw->mac.type >= e1000_pch_spt) {
+ /* in SPT, gfpreg doesn't exist. NVM size is taken from the
+ * STRAP register. This is because in SPT the GbE Flash region
+ * is no longer accessed through the flash registers. Instead,
+ * the mechanism has changed, and the Flash region access
+ * registers are now implemented in GbE memory space.
+ */
+ nvm->flash_base_addr = 0;
+ nvm_size =
+ (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
+ * NVM_SIZE_MULTIPLIER;
+ nvm->flash_bank_size = nvm_size / 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ /* Set the base address for flash register access */
+ hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
+ } else {
+ /* Can't read flash registers if register set isn't mapped. */
+ if (!hw->flash_address) {
+ DEBUGOUT("ERROR: Flash registers not mapped\n");
+ return -E1000_ERR_CONFIG;
+ }
- /* sector_X_addr is a "sector"-aligned address (4096 bytes)
- * Add 1 to sector_end_addr since this sector is included in
- * the overall size.
- */
- sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
- sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+ gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
- /* flash_base_addr is byte-aligned */
- nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes)
+ * Add 1 to sector_end_addr since this sector is included in
+ * the overall size.
+ */
+ sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+ sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
- /* find total size of the NVM, then cut in half since the total
- * size represents two separate NVM banks.
- */
- nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
- << FLASH_SECTOR_ADDR_SHIFT);
- nvm->flash_bank_size /= 2;
- /* Adjust to word count */
- nvm->flash_bank_size /= sizeof(u16);
+ /* flash_base_addr is byte-aligned */
+ nvm->flash_base_addr = sector_base_addr
+ << FLASH_SECTOR_ADDR_SHIFT;
+
+ /* find total size of the NVM, then cut in half since the total
+ * size represents two separate NVM banks.
+ */
+ nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
+ << FLASH_SECTOR_ADDR_SHIFT);
+ nvm->flash_bank_size /= 2;
+ /* Adjust to word count */
+ nvm->flash_bank_size /= sizeof(u16);
+ }
nvm->word_size = E1000_SHADOW_RAM_WORDS;
@@ -668,8 +704,13 @@ STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* Function Pointers */
nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
nvm->ops.release = e1000_release_nvm_ich8lan;
- nvm->ops.read = e1000_read_nvm_ich8lan;
- nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
+ if (hw->mac.type >= e1000_pch_spt) {
+ nvm->ops.read = e1000_read_nvm_spt;
+ nvm->ops.update = e1000_update_nvm_checksum_spt;
+ } else {
+ nvm->ops.read = e1000_read_nvm_ich8lan;
+ nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
+ }
nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
nvm->ops.write = e1000_write_nvm_ich8lan;
@@ -758,6 +799,8 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_pch2lan;
/* fall-through */
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
/* multicast address update for pch2 */
mac->ops.update_mc_addr_list =
@@ -768,7 +811,13 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
/* save PCH revision_id */
e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
- hw->revision_id = (u8)(pci_cfg &= 0x000F);
+ /* SPT uses full byte for revision ID,
+ * as opposed to previous generations
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hw->revision_id = (u8)(pci_cfg &= 0x00FF);
+ else
+ hw->revision_id = (u8)(pci_cfg &= 0x000F);
#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -786,7 +835,7 @@ STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
break;
}
- if (mac->type == e1000_pch_lpt) {
+ if (mac->type >= e1000_pch_lpt) {
mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
@@ -1015,8 +1064,9 @@ release:
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
- if (!link || ((status & E1000_STATUS_SPEED_100) &&
- (status & E1000_STATUS_FD)))
+ if ((hw->phy.revision > 5) || !link ||
+ ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
goto update_fextnvm6;
ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
@@ -1068,6 +1118,7 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
u32 mac_reg;
s32 ret_val = E1000_SUCCESS;
u16 phy_reg;
+ u16 oem_reg = 0;
if ((hw->mac.type < e1000_pch_lpt) ||
(hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
@@ -1128,6 +1179,25 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
+ /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
+ * LPLU and disable Gig speed when entering ULP
+ */
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
+ ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ &oem_reg);
+ if (ret_val)
+ goto release;
+
+ phy_reg = oem_reg;
+ phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
+
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ phy_reg);
+
+ if (ret_val)
+ goto release;
+ }
+
skip_smbus:
if (!to_sx) {
/* Change the 'Link Status Change' interrupt to trigger
@@ -1184,6 +1254,14 @@ skip_smbus:
E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
}
+ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
+ to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+ ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
+ oem_reg);
+ if (ret_val)
+ goto release;
+ }
+
release:
hw->phy.ops.release(hw);
out:
@@ -1240,10 +1318,10 @@ s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
}
- /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
+ /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
while (E1000_READ_REG(hw, E1000_FWSM) &
E1000_FWSM_ULP_CFG_DONE) {
- if (i++ == 10) {
+ if (i++ == 30) {
ret_val = -E1000_ERR_PHY;
goto out;
}
@@ -1343,6 +1421,8 @@ s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
I218_ULP_CONFIG1_RESET_TO_SMBUS |
I218_ULP_CONFIG1_WOL_HOST |
I218_ULP_CONFIG1_INBAND_EXIT |
+ I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
+ I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
I218_ULP_CONFIG1_DISABLE_SMB_PERST);
e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
@@ -1360,6 +1440,8 @@ s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
if (hw->mac.autoneg)
e1000_phy_setup_autoneg(hw);
+ else
+ e1000_setup_copper_link_generic(hw);
e1000_sw_lcd_config_ich8lan(hw);
@@ -1397,6 +1479,8 @@ out:
}
#endif /* ULP_SUPPORT */
+
+
/**
* e1000_check_for_copper_link_ich8lan - Check for link (Copper)
* @hw: pointer to the HW structure
@@ -1456,8 +1540,7 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* aggressive resulting in many collisions. To avoid this, increase
* the IPG and reduce Rx latency in the PHY.
*/
- if (((hw->mac.type == e1000_pch2lan) ||
- (hw->mac.type == e1000_pch_lpt)) && link) {
+ if ((hw->mac.type >= e1000_pch2lan) && link) {
u16 speed, duplex;
e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
@@ -1468,6 +1551,10 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
tipg_reg |= 0xFF;
/* Reduce Rx latency in analog PHY */
emi_val = 0;
+ } else if (hw->mac.type >= e1000_pch_spt &&
+ duplex == FULL_DUPLEX && speed != SPEED_1000) {
+ tipg_reg |= 0xC;
+ emi_val = 1;
} else {
/* Roll back the default values */
tipg_reg |= 0x08;
@@ -1486,10 +1573,78 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
emi_addr = I217_RX_CONFIG;
ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
+
+ if (hw->mac.type >= e1000_pch_lpt) {
+ u16 phy_reg;
+
+ hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
+ &phy_reg);
+ phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
+ if (speed == SPEED_100 || speed == SPEED_10)
+ phy_reg |= 0x3E8;
+ else
+ phy_reg |= 0xFA;
+ hw->phy.ops.write_reg_locked(hw,
+ I217_PLL_CLOCK_GATE_REG,
+ phy_reg);
+
+ if (speed == SPEED_1000) {
+ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+ &phy_reg);
+
+ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+ phy_reg);
+ }
+ }
hw->phy.ops.release(hw);
if (ret_val)
return ret_val;
+
+ if (hw->mac.type >= e1000_pch_spt) {
+ u16 data;
+ u16 ptr_gap;
+
+ if (speed == SPEED_1000) {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.read_reg_locked(hw,
+ PHY_REG(776, 20),
+ &data);
+ if (ret_val) {
+ hw->phy.ops.release(hw);
+ return ret_val;
+ }
+
+ ptr_gap = (data & (0x3FF << 2)) >> 2;
+ if (ptr_gap < 0x18) {
+ data &= ~(0x3FF << 2);
+ data |= (0x18 << 2);
+ ret_val =
+ hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(776, 20), data);
+ }
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+ } else {
+ ret_val = hw->phy.ops.acquire(hw);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = hw->phy.ops.write_reg_locked(hw,
+ PHY_REG(776, 20),
+ 0xC023);
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ return ret_val;
+
+ }
+ }
}
/* I217 Packet Loss issue:
@@ -1497,7 +1652,7 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
* on power up.
* Set the Beacon Duration for I217 to 8 usec
*/
- if (hw->mac.type == e1000_pch_lpt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
u32 mac_reg;
mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
@@ -1519,10 +1674,29 @@ STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
hw->dev_spec.ich8lan.eee_lp_ability = 0;
/* Configure K0s minimum time */
- if (hw->mac.type == e1000_pch_lpt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
}
+ if (hw->mac.type >= e1000_pch_lpt) {
+ u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
+
+ if (hw->mac.type == e1000_pch_spt) {
+ /* FEXTNVM6 K1-off workaround - for SPT only */
+ u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
+
+ if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+ fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
+ else
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+ }
+
+ if (hw->dev_spec.ich8lan.disable_k1_off == true)
+ fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
+
+ E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
+ }
+
if (!link)
return E1000_SUCCESS; /* No link detected */
@@ -1616,6 +1790,8 @@ void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
break;
default:
@@ -2081,6 +2257,8 @@ STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -3204,6 +3382,41 @@ STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
switch (hw->mac.type) {
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
+ bank1_offset = nvm->flash_bank_size;
+ act_offset = E1000_ICH_NVM_SIG_WORD;
+
+ /* set bank to 0 in case flash read fails */
+ *bank = 0;
+
+ /* Check bank 0 */
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
+ &nvm_dword);
+ if (ret_val)
+ return ret_val;
+ sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 0;
+ return E1000_SUCCESS;
+ }
+
+ /* Check bank 1 */
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
+ bank1_offset,
+ &nvm_dword);
+ if (ret_val)
+ return ret_val;
+ sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
+ if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+ E1000_ICH_NVM_SIG_VALUE) {
+ *bank = 1;
+ return E1000_SUCCESS;
+ }
+
+ DEBUGOUT("ERROR: No valid NVM bank present\n");
+ return -E1000_ERR_NVM;
case e1000_ich8lan:
case e1000_ich9lan:
eecd = E1000_READ_REG(hw, E1000_EECD);
@@ -3251,6 +3464,99 @@ STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
}
/**
+ * e1000_read_nvm_spt - NVM access for SPT
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the word(s) to read.
+ * @words: Size of data to read in words.
+ * @data: pointer to the word(s) to read at offset.
+ *
+ * Reads a word(s) from the NVM
+ **/
+STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 act_offset;
+ s32 ret_val = E1000_SUCCESS;
+ u32 bank = 0;
+ u32 dword = 0;
+ u16 offset_to_read;
+ u16 i;
+
+ DEBUGFUNC("e1000_read_nvm_spt");
+
+ if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+ (words == 0)) {
+ DEBUGOUT("nvm parameter(s) out of bounds\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+ nvm->ops.acquire(hw);
+
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ act_offset = (bank) ? nvm->flash_bank_size : 0;
+ act_offset += offset;
+
+ ret_val = E1000_SUCCESS;
+
+ for (i = 0; i < words; i += 2) {
+ if (words - i == 1) {
+ if (dev_spec->shadow_ram[offset+i].modified) {
+ data[i] = dev_spec->shadow_ram[offset+i].value;
+ } else {
+ offset_to_read = act_offset + i -
+ ((act_offset + i) % 2);
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ if ((act_offset + i) % 2 == 0)
+ data[i] = (u16)(dword & 0xFFFF);
+ else
+ data[i] = (u16)((dword >> 16) & 0xFFFF);
+ }
+ } else {
+ offset_to_read = act_offset + i;
+ if (!(dev_spec->shadow_ram[offset+i].modified) ||
+ !(dev_spec->shadow_ram[offset+i+1].modified)) {
+ ret_val =
+ e1000_read_flash_dword_ich8lan(hw,
+ offset_to_read,
+ &dword);
+ if (ret_val)
+ break;
+ }
+ if (dev_spec->shadow_ram[offset+i].modified)
+ data[i] = dev_spec->shadow_ram[offset+i].value;
+ else
+ data[i] = (u16) (dword & 0xFFFF);
+ if (dev_spec->shadow_ram[offset+i].modified)
+ data[i+1] =
+ dev_spec->shadow_ram[offset+i+1].value;
+ else
+ data[i+1] = (u16) (dword >> 16 & 0xFFFF);
+ }
+ }
+
+ nvm->ops.release(hw);
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM read error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
* e1000_read_nvm_ich8lan - Read word(s) from the NVM
* @hw: pointer to the HW structure
* @offset: The offset (in bytes) of the word(s) to read.
@@ -3337,7 +3643,11 @@ STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
/* Clear FCERR and DAEL in hw status by writing 1 */
hsfsts.hsf_status.flcerr = 1;
hsfsts.hsf_status.dael = 1;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
/* Either we should have a hardware SPI cycle in progress
* bit to check against, in order to start a new cycle or
@@ -3353,7 +3663,12 @@ STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* Begin by setting Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval);
ret_val = E1000_SUCCESS;
} else {
s32 i;
@@ -3375,8 +3690,12 @@ STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
* now set the Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
- hsfsts.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval & 0xFFFF);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
+ hsfsts.regval);
} else {
DEBUGOUT("Flash controller busy, cannot get access\n");
}
@@ -3401,10 +3720,17 @@ STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
DEBUGFUNC("e1000_flash_cycle_ich8lan");
/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
- hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
+ else
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcgo = 1;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
/* wait till FDONE bit is set to 1 */
do {
@@ -3421,6 +3747,29 @@ STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
}
/**
+ * e1000_read_flash_dword_ich8lan - Read dword from flash
+ * @hw: pointer to the HW structure
+ * @offset: offset to data location
+ * @data: pointer to the location for storing the data
+ *
+ * Reads the flash dword at offset into data. Offset is converted
+ * to bytes before read.
+ **/
+STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ DEBUGFUNC("e1000_read_flash_dword_ich8lan");
+
+ if (!data)
+ return -E1000_ERR_NVM;
+
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+
+ return e1000_read_flash_data32_ich8lan(hw, offset, data);
+}
+
+/**
* e1000_read_flash_word_ich8lan - Read word from flash
* @hw: pointer to the HW structure
* @offset: offset to data location
@@ -3457,7 +3806,13 @@ STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
s32 ret_val;
u16 word = 0;
- ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+ /* In SPT, only 32 bits access is supported,
+ * so this function should not be called.
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ else
+ ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
if (ret_val)
return ret_val;
@@ -3543,6 +3898,83 @@ STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
return ret_val;
}
+/**
+ * e1000_read_flash_data32_ich8lan - Read dword from NVM
+ * @hw: pointer to the HW structure
+ * @offset: The offset (in bytes) of the dword to read.
+ * @data: Pointer to the dword to store the value read.
+ *
+ * Reads a byte or word from the NVM using the flash access registers.
+ **/
+STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 *data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val = -E1000_ERR_NVM;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_read_flash_data_ich8lan");
+
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
+ hw->mac.type < e1000_pch_spt)
+ return -E1000_ERR_NVM;
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
+
+ /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+ /* In SPT, This register is in Lan memory space, not flash.
+ * Therefore, only 32 bit access is supported
+ */
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ (u32)hsflctl.regval << 16);
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+ /* Check if FCERR is set to 1, if set to 1, clear it
+ * and try the whole sequence a few more times, else
+ * read in (shift in) the Flash Data0, the order is
+ * least significant byte first msb to lsb
+ */
+ if (ret_val == E1000_SUCCESS) {
+ *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
+ break;
+ } else {
+ /* If we've gotten here, then things are probably
+ * completely hosed, but if the error condition is
+ * detected, it won't hurt to give it another try...
+ * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFSTS);
+ if (hsfsts.hsf_status.flcerr) {
+ /* Repeat for some time before giving up. */
+ continue;
+ } else if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
/**
* e1000_write_nvm_ich8lan - Write word(s) to the NVM
@@ -3581,6 +4013,175 @@ STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
+ * e1000_update_nvm_checksum_spt - Update the checksum for NVM
+ * @hw: pointer to the HW structure
+ *
+ * The NVM checksum is updated by calling the generic update_nvm_checksum,
+ * which writes the checksum to the shadow ram. The changes in the shadow
+ * ram are then committed to the EEPROM by processing each bank at a time
+ * checking for the modified bit and writing only the pending changes.
+ * After a successful commit, the shadow ram is cleared and is ready for
+ * future writes.
+ **/
+STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
+{
+ struct e1000_nvm_info *nvm = &hw->nvm;
+ struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+ u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+ s32 ret_val;
+ u32 dword = 0;
+
+ DEBUGFUNC("e1000_update_nvm_checksum_spt");
+
+ ret_val = e1000_update_nvm_checksum_generic(hw);
+ if (ret_val)
+ goto out;
+
+ if (nvm->type != e1000_nvm_flash_sw)
+ goto out;
+
+ nvm->ops.acquire(hw);
+
+ /* We're writing to the opposite bank so if we're on bank 1,
+ * write to bank 0 etc. We also need to erase the segment that
+ * is going to be written
+ */
+ ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+ if (ret_val != E1000_SUCCESS) {
+ DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
+ bank = 0;
+ }
+
+ if (bank == 0) {
+ new_bank_offset = nvm->flash_bank_size;
+ old_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+ if (ret_val)
+ goto release;
+ } else {
+ old_bank_offset = nvm->flash_bank_size;
+ new_bank_offset = 0;
+ ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+ if (ret_val)
+ goto release;
+ }
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
+ /* Determine whether to write the value stored
+ * in the other NVM bank or a modified value stored
+ * in the shadow RAM
+ */
+ ret_val = e1000_read_flash_dword_ich8lan(hw,
+ i + old_bank_offset,
+ &dword);
+
+ if (dev_spec->shadow_ram[i].modified) {
+ dword &= 0xffff0000;
+ dword |= (dev_spec->shadow_ram[i].value & 0xffff);
+ }
+ if (dev_spec->shadow_ram[i + 1].modified) {
+ dword &= 0x0000ffff;
+ dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
+ << 16);
+ }
+ if (ret_val)
+ break;
+
+ /* If the word is 0x13, then make sure the signature bits
+ * (15:14) are 11b until the commit has completed.
+ * This will allow us to write 10b which indicates the
+ * signature is valid. We want to do this after the write
+ * has completed so that we don't mark the segment valid
+ * while the write is still in progress
+ */
+ if (i == E1000_ICH_NVM_SIG_WORD - 1)
+ dword |= E1000_ICH_NVM_SIG_MASK << 16;
+
+ /* Convert offset to bytes. */
+ act_offset = (i + new_bank_offset) << 1;
+
+ usec_delay(100);
+
+ /* Write the data to the new bank. Offset in words*/
+ act_offset = i + new_bank_offset;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
+ dword);
+ if (ret_val)
+ break;
+ }
+
+ /* Don't bother writing the segment valid bits if sector
+ * programming failed.
+ */
+ if (ret_val) {
+ DEBUGOUT("Flash commit failed.\n");
+ goto release;
+ }
+
+ /* Finally validate the new segment by setting bit 15:14
+ * to 10b in word 0x13 , this can be done without an
+ * erase as well since these bits are 11 to start with
+ * and we need to change bit 14 to 0b
+ */
+ act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+
+ /*offset in words but we read dword*/
+ --act_offset;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0xBFFFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* And invalidate the previously valid segment by setting
+ * its signature word (0x13) high_byte to 0b. This can be
+ * done without an erase because flash erase sets all bits
+ * to 1's. We can write 1's to 0's without an erase
+ */
+ act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+
+ /* offset in words but we read dword*/
+ act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
+ ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
+
+ if (ret_val)
+ goto release;
+
+ dword &= 0x00FFFFFF;
+ ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
+
+ if (ret_val)
+ goto release;
+
+ /* Great! Everything worked, we can now clear the cached entries. */
+ for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+ dev_spec->shadow_ram[i].modified = false;
+ dev_spec->shadow_ram[i].value = 0xFFFF;
+ }
+
+release:
+ nvm->ops.release(hw);
+
+ /* Reload the EEPROM, or else modifications will not appear
+ * until after the next adapter reset.
+ */
+ if (!ret_val) {
+ nvm->ops.reload(hw);
+ msec_delay(10);
+ }
+
+out:
+ if (ret_val)
+ DEBUGOUT1("NVM update error: %d\n", ret_val);
+
+ return ret_val;
+}
+
+/**
* e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
* @hw: pointer to the HW structure
*
@@ -3757,6 +4358,8 @@ STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
*/
switch (hw->mac.type) {
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
@@ -3804,8 +4407,13 @@ STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
DEBUGFUNC("e1000_write_ich8_data");
- if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
- return -E1000_ERR_NVM;
+ if (hw->mac.type >= e1000_pch_spt) {
+ if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ } else {
+ if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
hw->nvm.flash_base_addr);
@@ -3816,12 +4424,29 @@ STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ret_val = e1000_flash_cycle_init_ich8lan(hw);
if (ret_val != E1000_SUCCESS)
break;
- hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval =
+ E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
+ else
+ hsflctl.regval =
+ E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
hsflctl.hsf_ctrl.fldbcount = size - 1;
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
@@ -3859,6 +4484,94 @@ STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
return ret_val;
}
+/**
+* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
+* @hw: pointer to the HW structure
+* @offset: The offset (in bytes) of the dwords to read.
+* @data: The 4 bytes to write to the NVM.
+*
+* Writes one/two/four bytes to the NVM using the flash access registers.
+**/
+STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
+ u32 data)
+{
+ union ich8_hws_flash_status hsfsts;
+ union ich8_hws_flash_ctrl hsflctl;
+ u32 flash_linear_addr;
+ s32 ret_val;
+ u8 count = 0;
+
+ DEBUGFUNC("e1000_write_flash_data32_ich8lan");
+
+ if (hw->mac.type >= e1000_pch_spt) {
+ if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
+ return -E1000_ERR_NVM;
+ }
+ flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+ hw->nvm.flash_base_addr);
+ do {
+ usec_delay(1);
+ /* Steps */
+ ret_val = e1000_flash_cycle_init_ich8lan(hw);
+ if (ret_val != E1000_SUCCESS)
+ break;
+
+ /* In SPT, This register is in Lan memory space, not
+ * flash. Therefore, only 32 bit access is supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval = E1000_READ_FLASH_REG(hw,
+ ICH_FLASH_HSFSTS)
+ >> 16;
+ else
+ hsflctl.regval = E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFCTL);
+
+ hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
+ hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+
+ /* In SPT, This register is in Lan memory space,
+ * not flash. Therefore, only 32 bit access is
+ * supported
+ */
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
+
+ /* check if FCERR is set to 1 , if set to 1, clear it
+ * and try the whole sequence a few more times else done
+ */
+ ret_val = e1000_flash_cycle_ich8lan(hw,
+ ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+
+ if (ret_val == E1000_SUCCESS)
+ break;
+
+ /* If we're here, then things are most likely
+ * completely hosed, but if the error condition
+ * is detected, it won't hurt to give it another
+ * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+ */
+ hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+ if (hsfsts.hsf_status.flcerr)
+ /* Repeat for some time before giving up. */
+ continue;
+ if (!hsfsts.hsf_status.flcdone) {
+ DEBUGOUT("Timeout error - flash cycle did not complete.\n");
+ break;
+ }
+ } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+ return ret_val;
+}
/**
* e1000_write_flash_byte_ich8lan - Write a single byte to NVM
@@ -3878,7 +4591,42 @@ STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
}
+/**
+* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
+* @hw: pointer to the HW structure
+* @offset: The offset of the word to write.
+* @dword: The dword to write to the NVM.
+*
+* Writes a single dword to the NVM using the flash access registers.
+* Goes through a retry algorithm before giving up.
+**/
+STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
+ u32 offset, u32 dword)
+{
+ s32 ret_val;
+ u16 program_retries;
+ DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
+
+ /* Must convert word offset into bytes. */
+ offset <<= 1;
+
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+
+ if (!ret_val)
+ return ret_val;
+ for (program_retries = 0; program_retries < 100; program_retries++) {
+ DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
+ usec_delay(100);
+ ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
+ if (ret_val == E1000_SUCCESS)
+ break;
+ }
+ if (program_retries == 100)
+ return -E1000_ERR_NVM;
+
+ return E1000_SUCCESS;
+}
/**
* e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
@@ -3988,12 +4736,22 @@ STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
/* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
- hsflctl.regval =
- E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+ if (hw->mac.type >= e1000_pch_spt)
+ hsflctl.regval =
+ E1000_READ_FLASH_REG(hw,
+ ICH_FLASH_HSFSTS)>>16;
+ else
+ hsflctl.regval =
+ E1000_READ_FLASH_REG16(hw,
+ ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
- E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
- hsflctl.regval);
+ if (hw->mac.type >= e1000_pch_spt)
+ E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
+ hsflctl.regval << 16);
+ else
+ E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
+ hsflctl.regval);
/* Write the last 24 bits of an index within the
* block into Flash Linear address field in Flash
@@ -4426,7 +5184,7 @@ STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_RFCTL, reg);
/* Enable ECC on Lynxpoint */
- if (hw->mac.type == e1000_pch_lpt) {
+ if (hw->mac.type >= e1000_pch_lpt) {
reg = E1000_READ_REG(hw, E1000_PBECCSTS);
reg |= E1000_PBECCSTS_ECC_ENABLE;
E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
@@ -4858,7 +5616,8 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
(device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
(device_id == E1000_DEV_ID_PCH_I218_LM3) ||
- (device_id == E1000_DEV_ID_PCH_I218_V3)) {
+ (device_id == E1000_DEV_ID_PCH_I218_V3) ||
+ (hw->mac.type >= e1000_pch_spt)) {
u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
E1000_WRITE_REG(hw, E1000_FEXTNVM6,
diff --git a/drivers/net/e1000/base/e1000_ich8lan.h b/drivers/net/e1000/base/e1000_ich8lan.h
index 33e77fb8..bc4ed1dd 100644
--- a/drivers/net/e1000/base/e1000_ich8lan.h
+++ b/drivers/net/e1000/base/e1000_ich8lan.h
@@ -121,6 +121,18 @@ POSSIBILITY OF SUCH DAMAGE.
#if !defined(EXTERNAL_RELEASE) || defined(ULP_SUPPORT)
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
+#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800
+#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000
+#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200
+#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
+
+/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
+
+#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field*/
+#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs*/
+#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
+#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
@@ -198,6 +210,10 @@ POSSIBILITY OF SUCH DAMAGE.
#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
+/* enable ULP even if when phy powered down via lanphypc */
+#define I218_ULP_CONFIG1_EN_ULP_LANPHYPC 0x0400
+/* disable clear of sticky ULP on PERST */
+#define I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 0x0800
#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
#endif /* !EXTERNAL_RELEASE || ULP_SUPPORT */
@@ -234,9 +250,12 @@ POSSIBILITY OF SUCH DAMAGE.
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_CLK_REQ 0x200
#define HV_PM_CTRL_K1_ENABLE 0x4000
+#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
+#define I217_PLL_CLOCK_GATE_MASK 0x07FF
+
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
/* Inband Control */
diff --git a/drivers/net/e1000/base/e1000_mbx.c b/drivers/net/e1000/base/e1000_mbx.c
index 6daf16b0..a92fd22e 100644
--- a/drivers/net/e1000/base/e1000_mbx.c
+++ b/drivers/net/e1000/base/e1000_mbx.c
@@ -430,15 +430,21 @@ STATIC s32 e1000_check_for_rst_vf(struct e1000_hw *hw,
STATIC s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
{
s32 ret_val = -E1000_ERR_MBX;
+ int count = 10;
DEBUGFUNC("e1000_obtain_mbx_lock_vf");
- /* Take ownership of the buffer */
- E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
+ do {
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
- /* reserve mailbox for vf use */
- if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
- ret_val = E1000_SUCCESS;
+ /* reserve mailbox for vf use */
+ if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(1000);
+ } while (count-- > 0);
return ret_val;
}
@@ -645,18 +651,26 @@ STATIC s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
{
s32 ret_val = -E1000_ERR_MBX;
u32 p2v_mailbox;
+ int count = 10;
DEBUGFUNC("e1000_obtain_mbx_lock_pf");
- /* Take ownership of the buffer */
- E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+ do {
+ /* Take ownership of the buffer */
+ E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number),
+ E1000_P2VMAILBOX_PFU);
- /* reserve mailbox for vf use */
- p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
- if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
- ret_val = E1000_SUCCESS;
+ /* reserve mailbox for pf use */
+ p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
+ if (p2v_mailbox & E1000_P2VMAILBOX_PFU) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ usec_delay(1000);
+ } while (count-- > 0);
return ret_val;
+
}
/**
diff --git a/drivers/net/e1000/base/e1000_nvm.c b/drivers/net/e1000/base/e1000_nvm.c
index 762acd16..75c22827 100644
--- a/drivers/net/e1000/base/e1000_nvm.c
+++ b/drivers/net/e1000/base/e1000_nvm.c
@@ -1295,6 +1295,7 @@ void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
case e1000_82575:
case e1000_82576:
case e1000_82580:
+ case e1000_i354:
hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
/* Use this format, unless EETRACK ID exists,
* then use alternate format
diff --git a/drivers/net/e1000/base/e1000_osdep.h b/drivers/net/e1000/base/e1000_osdep.h
index 47a19481..b8868049 100644
--- a/drivers/net/e1000/base/e1000_osdep.h
+++ b/drivers/net/e1000/base/e1000_osdep.h
@@ -44,6 +44,7 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_byteorder.h>
+#include <rte_io.h>
#include "../e1000_logs.h"
@@ -94,17 +95,18 @@ typedef int bool;
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
-#define E1000_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define E1000_PCI_REG(reg) rte_read32(reg)
-#define E1000_PCI_REG16(reg) (*((volatile uint16_t *)(reg)))
+#define E1000_PCI_REG16(reg) rte_read16(reg)
-#define E1000_PCI_REG_WRITE(reg, value) do { \
- E1000_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \
-} while (0)
+#define E1000_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
-#define E1000_PCI_REG_WRITE16(reg, value) do { \
- E1000_PCI_REG16((reg)) = (rte_cpu_to_le_16(value)); \
-} while (0)
+#define E1000_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
+
+#define E1000_PCI_REG_WRITE16(reg, value) \
+ rte_write16((rte_cpu_to_le_16(value)), reg)
#define E1000_PCI_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
diff --git a/drivers/net/e1000/base/e1000_regs.h b/drivers/net/e1000/base/e1000_regs.h
index 84531a99..364a7261 100644
--- a/drivers/net/e1000/base/e1000_regs.h
+++ b/drivers/net/e1000/base/e1000_regs.h
@@ -66,6 +66,8 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
@@ -109,6 +111,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
#define E1000_PBS 0x01008 /* Packet Buffer Size */
#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */
+#define E1000_IOSFPC 0x00F28 /* TX corrupted data */
#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
#define E1000_EEMNGCTL_I210 0x01010 /* i210 MNG EEprom Mode Control */
#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */
@@ -591,6 +594,10 @@ POSSIBILITY OF SUCH DAMAGE.
#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */
#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */
#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */
+#define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */
+#define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */
+#define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */
#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
diff --git a/drivers/net/e1000/base/e1000_vf.c b/drivers/net/e1000/base/e1000_vf.c
index 7845b48e..44ab0188 100644
--- a/drivers/net/e1000/base/e1000_vf.c
+++ b/drivers/net/e1000/base/e1000_vf.c
@@ -421,12 +421,13 @@ void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+ msgbuf[0] = E1000_VF_SET_MULTICAST;
+
if (mc_addr_count > 30) {
msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW;
mc_addr_count = 30;
}
- msgbuf[0] = E1000_VF_SET_MULTICAST;
msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT;
for (i = 0; i < mc_addr_count; i++) {
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 6c25c8da..8352d0a7 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -138,6 +138,11 @@
#define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define IGB_TX_MAX_SEG UINT8_MAX
+#define IGB_TX_MAX_MTU_SEG UINT8_MAX
+#define EM_TX_MAX_SEG UINT8_MAX
+#define EM_TX_MAX_MTU_SEG UINT8_MAX
+
/* structure for interrupt relative data */
struct e1000_interrupt {
uint32_t flags;
@@ -286,6 +291,8 @@ struct e1000_adapter {
#define E1000_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct e1000_adapter *)adapter)->filter)
+#define E1000_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
/*
* RX/TX IGB function prototypes
*/
@@ -304,10 +311,15 @@ uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
+int eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt);
+
int eth_igb_rx_init(struct rte_eth_dev *dev);
void eth_igb_tx_init(struct rte_eth_dev *dev);
@@ -315,6 +327,9 @@ void eth_igb_tx_init(struct rte_eth_dev *dev);
uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
@@ -365,6 +380,9 @@ uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -376,6 +394,9 @@ void eth_em_tx_init(struct rte_eth_dev *dev);
uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/e1000/em_ethdev.c b/drivers/net/e1000/em_ethdev.c
index aee3d340..57eb017c 100644
--- a/drivers/net/e1000/em_ethdev.c
+++ b/drivers/net/e1000/em_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
@@ -83,9 +84,9 @@ static int eth_em_flow_ctrl_set(struct rte_eth_dev *dev,
static int eth_em_interrupt_setup(struct rte_eth_dev *dev);
static int eth_em_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_em_interrupt_get_status(struct rte_eth_dev *dev);
-static int eth_em_interrupt_action(struct rte_eth_dev *dev);
-static void eth_em_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static int eth_em_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void eth_em_interrupt_handler(void *param);
static int em_hw_init(struct e1000_hw *hw);
static int em_hardware_init(struct e1000_hw *hw);
@@ -119,8 +120,8 @@ static int eth_em_led_on(struct rte_eth_dev *dev);
static int eth_em_led_off(struct rte_eth_dev *dev);
static int em_get_rx_buffer_size(struct e1000_hw *hw);
-static void eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void eth_em_rar_clear(struct rte_eth_dev *dev, uint32_t index);
static int eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
@@ -168,6 +169,19 @@ static const struct rte_pci_id pci_id_em_map[] = {
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V2) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_LM3) },
{ RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_I218_V3) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V2) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_LBG_I219_LM3) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM4) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V4) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_LM5) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_SPT_I219_V5) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM6) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V6) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_LM7) },
+ { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_PCH_CNP_I219_V7) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -191,6 +205,8 @@ static const struct eth_dev_ops eth_em_ops = {
.rx_queue_release = eth_em_rx_queue_release,
.rx_queue_count = eth_em_rx_queue_count,
.rx_descriptor_done = eth_em_rx_descriptor_done,
+ .rx_descriptor_status = eth_em_rx_descriptor_status,
+ .tx_descriptor_status = eth_em_tx_descriptor_status,
.tx_queue_setup = eth_em_tx_queue_setup,
.tx_queue_release = eth_em_tx_queue_release,
.rx_queue_intr_enable = eth_em_rx_queue_intr_enable,
@@ -278,6 +294,19 @@ eth_em_dev_is_ich8(struct e1000_hw *hw)
case E1000_DEV_ID_PCH_I218_LM2:
case E1000_DEV_ID_PCH_I218_V3:
case E1000_DEV_ID_PCH_I218_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM:
+ case E1000_DEV_ID_PCH_SPT_I219_V:
+ case E1000_DEV_ID_PCH_SPT_I219_LM2:
+ case E1000_DEV_ID_PCH_SPT_I219_V2:
+ case E1000_DEV_ID_PCH_LBG_I219_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM4:
+ case E1000_DEV_ID_PCH_SPT_I219_V4:
+ case E1000_DEV_ID_PCH_SPT_I219_LM5:
+ case E1000_DEV_ID_PCH_SPT_I219_V5:
+ case E1000_DEV_ID_PCH_CNP_I219_LM6:
+ case E1000_DEV_ID_PCH_CNP_I219_V6:
+ case E1000_DEV_ID_PCH_CNP_I219_LM7:
+ case E1000_DEV_ID_PCH_CNP_I219_V7:
return 1;
default:
return 0;
@@ -287,7 +316,8 @@ eth_em_dev_is_ich8(struct e1000_hw *hw)
static int
eth_em_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
struct e1000_hw *hw =
@@ -295,11 +325,10 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
struct e1000_vfta * shadow_vfta =
E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
- pci_dev = eth_dev->pci_dev;
-
eth_dev->dev_ops = &eth_em_ops;
eth_dev->rx_pkt_burst = (eth_rx_burst_t)&eth_em_recv_pkts;
eth_dev->tx_pkt_burst = (eth_tx_burst_t)&eth_em_xmit_pkts;
+ eth_dev->tx_pkt_prepare = (eth_tx_prep_t)&eth_em_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -312,6 +341,7 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->device_id = pci_dev->id.device_id;
@@ -351,8 +381,8 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
- rte_intr_callback_register(&(pci_dev->intr_handle),
- eth_em_interrupt_handler, (void *)eth_dev);
+ rte_intr_callback_register(intr_handle,
+ eth_em_interrupt_handler, eth_dev);
return 0;
}
@@ -360,17 +390,16 @@ eth_em_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -EPERM;
- pci_dev = eth_dev->pci_dev;
-
if (adapter->stopped == 0)
eth_em_close(eth_dev);
@@ -382,24 +411,30 @@ eth_em_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->data->mac_addrs = NULL;
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- eth_em_interrupt_handler, (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ eth_em_interrupt_handler, eth_dev);
return 0;
}
-static struct eth_driver rte_em_pmd = {
- .pci_drv = {
- .id_table = pci_id_em_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_em_dev_init,
- .eth_dev_uninit = eth_em_dev_uninit,
- .dev_private_size = sizeof(struct e1000_adapter),
+static int eth_em_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct e1000_adapter), eth_em_dev_init);
+}
+
+static int eth_em_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_em_dev_uninit);
+}
+
+static struct rte_pci_driver rte_em_pmd = {
+ .id_table = pci_id_em_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_em_pci_probe,
+ .remove = eth_em_pci_remove,
};
static int
@@ -540,6 +575,8 @@ em_set_pba(struct e1000_hw *hw)
case e1000_pchlan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
pba = E1000_PBA_26K;
break;
default:
@@ -556,7 +593,9 @@ eth_em_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE(dev->data->dev_private);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev =
+ E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
uint32_t *speeds;
@@ -609,7 +648,7 @@ eth_em_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
@@ -706,7 +745,7 @@ eth_em_start(struct rte_eth_dev *dev)
(void *)dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplexn");
}
/* check if rxq interrupt is enabled */
if (dev->data->dev_conf.intr_conf.rxq != 0)
@@ -738,7 +777,8 @@ eth_em_stop(struct rte_eth_dev *dev)
{
struct rte_eth_link link;
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_disable(hw);
em_lsc_intr_disable(hw);
@@ -853,7 +893,9 @@ em_hardware_init(struct e1000_hw *hw)
hw->fc.low_water = 0x5048;
hw->fc.pause_time = 0x0650;
hw->fc.refresh_time = 0x0400;
- } else if (hw->mac.type == e1000_pch_lpt) {
+ } else if (hw->mac.type == e1000_pch_lpt ||
+ hw->mac.type == e1000_pch_spt ||
+ hw->mac.type == e1000_pch_cnp) {
hw->fc.requested_mode = e1000_fc_full;
}
@@ -999,9 +1041,11 @@ static int
eth_em_rx_queue_intr_enable(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
em_rxq_intr_enable(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -1026,6 +1070,8 @@ em_get_max_pktlen(const struct e1000_hw *hw)
case e1000_ich10lan:
case e1000_pch2lan:
case e1000_pch_lpt:
+ case e1000_pch_spt:
+ case e1000_pch_cnp:
case e1000_82574:
case e1000_80003es2lan: /* 9K Jumbo Frame size */
case e1000_82583:
@@ -1045,9 +1091,20 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = em_get_max_pktlen(hw);
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
/*
* Starting with 631xESB hw supports 2 TX/RX queues per port.
@@ -1079,6 +1136,8 @@ eth_em_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_max = E1000_MAX_RING_DESC,
.nb_min = E1000_MIN_RING_DESC,
.nb_align = EM_TXD_ALIGN,
+ .nb_seg_max = EM_TX_MAX_SEG,
+ .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
};
dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
@@ -1536,8 +1595,10 @@ eth_em_interrupt_get_status(struct rte_eth_dev *dev)
* - On failure, a negative value.
*/
static int
-eth_em_interrupt_action(struct rte_eth_dev *dev)
+eth_em_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
{
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
@@ -1550,7 +1611,7 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
return -1;
intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
/* set get_link_status to check register later */
hw->mac.get_link_status = 1;
@@ -1571,8 +1632,8 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
PMD_INIT_LOG(INFO, " Port %d: Link Down", dev->data->port_id);
}
PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
- dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
tctl = E1000_READ_REG(hw, E1000_TCTL);
rctl = E1000_READ_REG(hw, E1000_RCTL);
@@ -1604,13 +1665,12 @@ eth_em_interrupt_action(struct rte_eth_dev *dev)
* void
*/
static void
-eth_em_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+eth_em_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
eth_em_interrupt_get_status(dev);
- eth_em_interrupt_action(dev);
+ eth_em_interrupt_action(dev, dev->intr_handle);
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
@@ -1734,13 +1794,13 @@ eth_em_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
return -EIO;
}
-static void
+static int
eth_em_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, __rte_unused uint32_t pool)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- e1000_rar_set(hw, mac_addr->addr_bytes, index);
+ return e1000_rar_set(hw, mac_addr->addr_bytes, index);
}
static void
@@ -1805,5 +1865,6 @@ eth_em_set_mc_addr_list(struct rte_eth_dev *dev,
return 0;
}
-RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_e1000_em, rte_em_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_em, pci_id_em_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_em, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c
index 41f51c0f..31819c5b 100644
--- a/drivers/net/e1000/em_rxtx.c
+++ b/drivers/net/e1000/em_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,6 +66,7 @@
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
+#include <rte_net.h>
#include <rte_string_fns.h>
#include "e1000_logs.h"
@@ -77,6 +78,14 @@
#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+#define E1000_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_VLAN_PKT)
+
+#define E1000_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
+
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
@@ -610,7 +619,7 @@ end_of_tx:
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -618,6 +627,43 @@ end_of_tx:
/*********************************************************************
*
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
* RX functions
*
**********************************************************************/
@@ -1390,11 +1436,6 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct em_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
@@ -1427,6 +1468,57 @@ eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
return !!(rxdp->status & E1000_RXD_STAT_DD);
}
+int
+eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct em_rx_queue *rxq = rx_queue;
+ volatile uint8_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].status;
+ if (*status & E1000_RXD_STAT_DD)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct em_tx_queue *txq = tx_queue;
+ volatile uint8_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].upper.fields.status;
+ if (*status & E1000_TXD_STAT_DD)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
em_dev_clear_queues(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c
index 2fddf0cb..e1702d8b 100644
--- a/drivers/net/e1000/igb_ethdev.c
+++ b/drivers/net/e1000/igb_ethdev.c
@@ -45,6 +45,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memory.h>
#include <rte_memzone.h>
#include <rte_eal.h>
@@ -115,11 +116,19 @@ static void eth_igb_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *rte_stats);
static int eth_igb_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
+static int eth_igb_xstats_get_by_id(struct rte_eth_dev *dev,
+ const uint64_t *ids,
+ uint64_t *values, unsigned int n);
static int eth_igb_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- unsigned limit);
+ unsigned int size);
+static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ unsigned int limit);
static void eth_igb_stats_reset(struct rte_eth_dev *dev);
static void eth_igb_xstats_reset(struct rte_eth_dev *dev);
+static int eth_igb_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
static void eth_igb_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *eth_igb_supported_ptypes_get(struct rte_eth_dev *dev);
@@ -132,9 +141,9 @@ static int eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
-static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
-static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static int eth_igb_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void eth_igb_interrupt_handler(void *param);
static int igb_hardware_init(struct e1000_hw *hw);
static void igb_hw_control_acquire(struct e1000_hw *hw);
static void igb_hw_control_release(struct e1000_hw *hw);
@@ -162,9 +171,9 @@ static int eth_igb_led_off(struct rte_eth_dev *dev);
static void igb_intr_disable(struct e1000_hw *hw);
static int igb_get_rx_buffer_size(struct e1000_hw *hw);
-static void eth_igb_rar_set(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int eth_igb_rar_set(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
static void eth_igb_default_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *addr);
@@ -280,8 +289,7 @@ static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction,
static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector,
uint8_t index, uint8_t offset);
static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev);
-static void eth_igbvf_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static void eth_igbvf_interrupt_handler(void *param);
static void igbvf_mbx_process(struct rte_eth_dev *dev);
/*
@@ -369,6 +377,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
.nb_max = E1000_MAX_RING_DESC,
.nb_min = E1000_MIN_RING_DESC,
.nb_align = IGB_RXD_ALIGN,
+ .nb_seg_max = IGB_TX_MAX_SEG,
+ .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG,
};
static const struct eth_dev_ops eth_igb_ops = {
@@ -385,9 +395,12 @@ static const struct eth_dev_ops eth_igb_ops = {
.link_update = eth_igb_link_update,
.stats_get = eth_igb_stats_get,
.xstats_get = eth_igb_xstats_get,
+ .xstats_get_by_id = eth_igb_xstats_get_by_id,
+ .xstats_get_names_by_id = eth_igb_xstats_get_names_by_id,
.xstats_get_names = eth_igb_xstats_get_names,
.stats_reset = eth_igb_stats_reset,
.xstats_reset = eth_igb_xstats_reset,
+ .fw_version_get = eth_igb_fw_version_get,
.dev_infos_get = eth_igb_infos_get,
.dev_supported_ptypes_get = eth_igb_supported_ptypes_get,
.mtu_set = eth_igb_mtu_set,
@@ -400,8 +413,11 @@ static const struct eth_dev_ops eth_igb_ops = {
.rx_queue_release = eth_igb_rx_queue_release,
.rx_queue_count = eth_igb_rx_queue_count,
.rx_descriptor_done = eth_igb_rx_descriptor_done,
+ .rx_descriptor_status = eth_igb_rx_descriptor_status,
+ .tx_descriptor_status = eth_igb_tx_descriptor_status,
.tx_queue_setup = eth_igb_tx_queue_setup,
.tx_queue_release = eth_igb_tx_queue_release,
+ .tx_done_cleanup = eth_igb_tx_done_cleanup,
.dev_led_on = eth_igb_led_on,
.dev_led_off = eth_igb_led_off,
.flow_ctrl_get = eth_igb_flow_ctrl_get,
@@ -668,15 +684,16 @@ igb_pf_reset_hw(struct e1000_hw *hw)
}
static void
-igb_identify_hardware(struct rte_eth_dev *dev)
+igb_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- hw->vendor_id = dev->pci_dev->id.vendor_id;
- hw->device_id = dev->pci_dev->id.device_id;
- hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
- hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
e1000_set_mac_type(hw);
@@ -743,7 +760,7 @@ static int
eth_igb_dev_init(struct rte_eth_dev *eth_dev)
{
int error = 0;
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct e1000_vfta * shadow_vfta =
@@ -755,11 +772,10 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
uint32_t ctrl_ext;
- pci_dev = eth_dev->pci_dev;
-
eth_dev->dev_ops = &eth_igb_ops;
eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -771,10 +787,11 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev)
}
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->hw_addr= (void *)pci_dev->mem_resource[0].addr;
- igb_identify_hardware(eth_dev);
+ igb_identify_hardware(eth_dev, pci_dev);
if (e1000_setup_init_funcs(hw, FALSE) != E1000_SUCCESS) {
error = -EIO;
goto err_late;
@@ -908,6 +925,7 @@ static int
eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct e1000_hw *hw;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
@@ -918,7 +936,8 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
return -EPERM;
hw = E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- pci_dev = eth_dev->pci_dev;
+ pci_dev = E1000_DEV_TO_PCI(eth_dev);
+ intr_handle = &pci_dev->intr_handle;
if (adapter->stopped == 0)
eth_igb_close(eth_dev);
@@ -937,9 +956,9 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev)
igb_pf_host_uninit(eth_dev);
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- eth_igb_interrupt_handler, (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ eth_igb_interrupt_handler, eth_dev);
return 0;
}
@@ -951,6 +970,7 @@ static int
eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
struct e1000_hw *hw =
@@ -963,6 +983,7 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &igbvf_eth_dev_ops;
eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &eth_igb_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -973,9 +994,9 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
+ pci_dev = E1000_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -1012,12 +1033,6 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
/* Generate a random MAC address, if none was assigned by PF. */
if (is_zero_ether_addr(perm_addr)) {
eth_random_addr(perm_addr->addr_bytes);
- diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
- if (diag) {
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
- return diag;
- }
PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
"%02x:%02x:%02x:%02x:%02x:%02x",
@@ -1029,6 +1044,12 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
perm_addr->addr_bytes[5]);
}
+ diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
+ if (diag) {
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ return diag;
+ }
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
&eth_dev->data->mac_addrs[0]);
@@ -1038,9 +1059,9 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id, "igb_mac_82576_vf");
- rte_intr_callback_register(&pci_dev->intr_handle,
- eth_igbvf_interrupt_handler,
- (void *)eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+ rte_intr_callback_register(intr_handle,
+ eth_igbvf_interrupt_handler, eth_dev);
return 0;
}
@@ -1050,7 +1071,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1076,32 +1097,46 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static struct eth_driver rte_igb_pmd = {
- .pci_drv = {
- .id_table = pci_id_igb_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_igb_dev_init,
- .eth_dev_uninit = eth_igb_dev_uninit,
- .dev_private_size = sizeof(struct e1000_adapter),
+static int eth_igb_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct e1000_adapter), eth_igb_dev_init);
+}
+
+static int eth_igb_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_igb_dev_uninit);
+}
+
+static struct rte_pci_driver rte_igb_pmd = {
+ .id_table = pci_id_igb_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_igb_pci_probe,
+ .remove = eth_igb_pci_remove,
};
+
+static int eth_igbvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct e1000_adapter), eth_igbvf_dev_init);
+}
+
+static int eth_igbvf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_igbvf_dev_uninit);
+}
+
/*
* virtual function driver struct
*/
-static struct eth_driver rte_igbvf_pmd = {
- .pci_drv = {
- .id_table = pci_id_igbvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_igbvf_dev_init,
- .eth_dev_uninit = eth_igbvf_dev_uninit,
- .dev_private_size = sizeof(struct e1000_adapter),
+static struct rte_pci_driver rte_igbvf_pmd = {
+ .id_table = pci_id_igbvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_igbvf_pci_probe,
+ .remove = eth_igbvf_pci_remove,
};
static void
@@ -1217,7 +1252,8 @@ eth_igb_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret, mask;
uint32_t intr_vector = 0;
uint32_t ctrl_ext;
@@ -1281,7 +1317,7 @@ eth_igb_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -1388,7 +1424,7 @@ eth_igb_start(struct rte_eth_dev *dev)
(void *)dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplex");
}
/* check if rxq interrupt is enabled */
@@ -1425,11 +1461,12 @@ eth_igb_stop(struct rte_eth_dev *dev)
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_filter_info *filter_info =
E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
struct rte_eth_link link;
struct e1000_flex_filter *p_flex;
struct e1000_5tuple_filter *p_5tuple, *p_5tuple_next;
struct e1000_2tuple_filter *p_2tuple, *p_2tuple_next;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
igb_intr_disable(hw);
@@ -1529,7 +1566,8 @@ eth_igb_close(struct rte_eth_dev *dev)
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
struct rte_eth_link link;
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
eth_igb_stop(dev);
adapter->stopped = 1;
@@ -1549,10 +1587,9 @@ eth_igb_close(struct rte_eth_dev *dev)
igb_dev_free_queues(dev);
- pci_dev = dev->pci_dev;
- if (pci_dev->intr_handle.intr_vec) {
- rte_free(pci_dev->intr_handle.intr_vec);
- pci_dev->intr_handle.intr_vec = NULL;
+ if (intr_handle->intr_vec) {
+ rte_free(intr_handle->intr_vec);
+ intr_handle->intr_vec = NULL;
}
memset(&link, 0, sizeof(link));
@@ -1816,7 +1853,7 @@ eth_igb_xstats_reset(struct rte_eth_dev *dev)
static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
- __rte_unused unsigned limit)
+ __rte_unused unsigned int size)
{
unsigned i;
@@ -1833,6 +1870,41 @@ static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
return IGB_NB_XSTATS;
}
+static int eth_igb_xstats_get_names_by_id(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+ unsigned int limit)
+{
+ unsigned int i;
+
+ if (!ids) {
+ if (xstats_names == NULL)
+ return IGB_NB_XSTATS;
+
+ for (i = 0; i < IGB_NB_XSTATS; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_igb_stats_strings[i].name);
+
+ return IGB_NB_XSTATS;
+
+ } else {
+ struct rte_eth_xstat_name xstats_names_copy[IGB_NB_XSTATS];
+
+ eth_igb_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
+ IGB_NB_XSTATS);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= IGB_NB_XSTATS) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+ }
+}
+
static int
eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
@@ -1863,6 +1935,53 @@ eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return IGB_NB_XSTATS;
}
+static int
+eth_igb_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ unsigned int i;
+
+ if (!ids) {
+ struct e1000_hw *hw =
+ E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_hw_stats *hw_stats =
+ E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+ if (n < IGB_NB_XSTATS)
+ return IGB_NB_XSTATS;
+
+ igb_read_stats_registers(hw, hw_stats);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!values)
+ return 0;
+
+ /* Extended stats */
+ for (i = 0; i < IGB_NB_XSTATS; i++)
+ values[i] = *(uint64_t *)(((char *)hw_stats) +
+ rte_igb_stats_strings[i].offset);
+
+ return IGB_NB_XSTATS;
+
+ } else {
+ uint64_t values_copy[IGB_NB_XSTATS];
+
+ eth_igb_xstats_get_by_id(dev, NULL, values_copy,
+ IGB_NB_XSTATS);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= IGB_NB_XSTATS) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+ }
+}
+
static void
igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats)
{
@@ -1976,11 +2095,64 @@ eth_igbvf_stats_reset(struct rte_eth_dev *dev)
offsetof(struct e1000_vf_stats, gprc));
}
+static int
+eth_igb_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size)
+{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct e1000_fw_version fw;
+ int ret;
+
+ e1000_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (!(e1000_get_flash_presence_i210(hw))) {
+ ret = snprintf(fw_version, fw_size,
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
+ break;
+ }
+ /* fall through */
+ default:
+ /* if option rom is valid, display its version too */
+ if (fw.or_valid) {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ if (fw.etrack_id != 0X0000) {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor,
+ fw.etrack_id);
+ } else {
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor,
+ fw.eep_build);
+ }
+ }
+ break;
+ }
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
eth_igb_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2109,6 +2281,7 @@ eth_igbvf_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
dev_info->max_rx_pktlen = 0x3FFF; /* See RLPML register. */
dev_info->max_mac_addrs = hw->mac.rar_entry_count;
@@ -2633,12 +2806,14 @@ eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
* - On failure, a negative value.
*/
static int
-eth_igb_interrupt_action(struct rte_eth_dev *dev)
+eth_igb_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
uint32_t tctl, rctl;
struct rte_eth_link link;
int ret;
@@ -2649,7 +2824,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
}
igb_intr_enable(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
if (intr->flags & E1000_FLAG_NEED_LINK_UPDATE) {
intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
@@ -2677,10 +2852,10 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
}
PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
- dev->pci_dev->addr.domain,
- dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid,
- dev->pci_dev->addr.function);
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
tctl = E1000_READ_REG(hw, E1000_TCTL);
rctl = E1000_READ_REG(hw, E1000_RCTL);
if (link.link_status) {
@@ -2713,13 +2888,12 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
* void
*/
static void
-eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+eth_igb_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
eth_igb_interrupt_get_status(dev);
- eth_igb_interrupt_action(dev);
+ eth_igb_interrupt_action(dev, dev->intr_handle);
}
static int
@@ -2759,7 +2933,7 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
}
static int
-eth_igbvf_interrupt_action(struct rte_eth_dev *dev)
+eth_igbvf_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *intr_handle)
{
struct e1000_interrupt *intr =
E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -2770,19 +2944,18 @@ eth_igbvf_interrupt_action(struct rte_eth_dev *dev)
}
igbvf_intr_enable(dev);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
static void
-eth_igbvf_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+eth_igbvf_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
eth_igbvf_interrupt_get_status(dev);
- eth_igbvf_interrupt_action(dev);
+ eth_igbvf_interrupt_action(dev, dev->intr_handle);
}
static int
@@ -2906,7 +3079,7 @@ eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
}
#define E1000_RAH_POOLSEL_SHIFT (18)
-static void
+static int
eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, __rte_unused uint32_t pool)
{
@@ -2917,6 +3090,7 @@ eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
rah = E1000_READ_REG(hw, E1000_RAH(index));
rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + pool));
E1000_WRITE_REG(hw, E1000_RAH(index), rah);
+ return 0;
}
static void
@@ -3055,8 +3229,9 @@ igbvf_dev_start(struct rte_eth_dev *dev)
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct e1000_adapter *adapter =
E1000_DEV_PRIVATE(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int ret;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
uint32_t intr_vector = 0;
PMD_INIT_FUNC_TRACE();
@@ -3091,7 +3266,7 @@ igbvf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -3110,7 +3285,8 @@ igbvf_dev_start(struct rte_eth_dev *dev)
static void
igbvf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -3309,7 +3485,7 @@ eth_igb_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
@@ -3350,7 +3526,7 @@ eth_igb_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_128) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, ETH_RSS_RETA_SIZE_128);
+ "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
return -EINVAL;
}
@@ -3471,7 +3647,7 @@ eth_igb_syn_filter_handle(struct rte_eth_dev *dev,
(struct rte_eth_syn_filter *)arg);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -5095,6 +5271,8 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct e1000_hw *hw =
E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask = 1 << queue_id;
uint32_t regval;
@@ -5102,7 +5280,7 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
E1000_WRITE_REG(hw, E1000_EIMS, regval | mask);
E1000_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -5166,8 +5344,8 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
uint32_t vec = E1000_MISC_VEC_ID;
uint32_t base = E1000_MISC_VEC_ID;
uint32_t misc_shift = 0;
-
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
/* won't configure msix register if no mapping is done
* between intr vector and event fd
@@ -5238,7 +5416,9 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
E1000_WRITE_FLUSH(hw);
}
-RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
-RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_e1000_igb_vf, "* igb_uio | vfio");
diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c
index 5845bc22..923c78a1 100644
--- a/drivers/net/e1000/igb_pf.c
+++ b/drivers/net/e1000/igb_pf.c
@@ -57,7 +57,9 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- return eth_dev->pci_dev->max_vfs;
+ struct rte_pci_device *pci_dev = E1000_DEV_TO_PCI(eth_dev);
+
+ return pci_dev->max_vfs;
}
static inline
@@ -330,12 +332,16 @@ igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
*(E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
int rar_entry = hw->mac.rar_entry_count - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
+ int rah;
if (is_unicast_ether_addr((struct ether_addr *)new_mac)) {
if (!is_zero_ether_addr((struct ether_addr *)new_mac))
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
sizeof(vfinfo[vf].vf_mac_addresses));
hw->mac.ops.rar_set(hw, new_mac, rar_entry);
+ rah = E1000_READ_REG(hw, E1000_RAH(rar_entry));
+ rah |= (0x1 << (E1000_RAH_POOLSEL_SHIFT + vf));
+ E1000_WRITE_REG(hw, E1000_RAH(rar_entry), rah);
return 0;
}
return -1;
diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c
index dbd37acc..b3b601b7 100644
--- a/drivers/net/e1000/igb_rxtx.c
+++ b/drivers/net/e1000/igb_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,18 +65,28 @@
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
+#include <rte_net.h>
#include <rte_string_fns.h>
#include "e1000_logs.h"
#include "base/e1000_api.h"
#include "e1000_ethdev.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IGB_TX_IEEE1588_TMST 0
+#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IGB_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+ PKT_TX_TCP_SEG | \
+ IGB_TX_IEEE1588_TMST)
+
+#define IGB_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
@@ -605,7 +615,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*
* Set the Transmit Descriptor Tail (TDT).
*/
- E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
@@ -616,6 +626,52 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*********************************************************************
*
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ /* Check some limitations for TSO in hardware */
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
+ (m->l2_len + m->l3_len + m->l4_len >
+ IGB_TSO_MAX_HDRLEN)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
* RX functions
*
**********************************************************************/
@@ -1227,6 +1283,132 @@ eth_igb_tx_queue_release(void *txq)
igb_tx_queue_release(txq);
}
+static int
+igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
+{
+ struct igb_tx_entry *sw_ring;
+ volatile union e1000_adv_tx_desc *txr;
+ uint16_t tx_first; /* First segment analyzed. */
+ uint16_t tx_id; /* Current segment being processed. */
+ uint16_t tx_last; /* Last segment in the current packet. */
+ uint16_t tx_next; /* First segment of the next packet. */
+ int count;
+
+ if (txq != NULL) {
+ count = 0;
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
+
+ /*
+ * tx_tail is the last sent packet on the sw_ring. Goto the end
+ * of that packet (the last segment in the packet chain) and
+ * then the next segment will be the start of the oldest segment
+ * in the sw_ring. This is the first packet that will be
+ * attempted to be freed.
+ */
+
+ /* Get last segment in most recently added packet. */
+ tx_first = sw_ring[txq->tx_tail].last_id;
+
+ /* Get the next segment, which is the oldest segment in ring. */
+ tx_first = sw_ring[tx_first].next_id;
+
+ /* Set the current index to the first. */
+ tx_id = tx_first;
+
+ /*
+ * Loop through each packet. For each packet, verify that an
+ * mbuf exists and that the last segment is free. If so, free
+ * it and move on.
+ */
+ while (1) {
+ tx_last = sw_ring[tx_id].last_id;
+
+ if (sw_ring[tx_last].mbuf) {
+ if (txr[tx_last].wb.status &
+ E1000_TXD_STAT_DD) {
+ /*
+ * Increment the number of packets
+ * freed.
+ */
+ count++;
+
+ /* Get the start of the next packet. */
+ tx_next = sw_ring[tx_last].next_id;
+
+ /*
+ * Loop through all segments in a
+ * packet.
+ */
+ do {
+ rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+ sw_ring[tx_id].mbuf = NULL;
+ sw_ring[tx_id].last_id = tx_id;
+
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ } while (tx_id != tx_next);
+
+ if (unlikely(count == (int)free_cnt))
+ break;
+ } else
+ /*
+ * mbuf still in use, nothing left to
+ * free.
+ */
+ break;
+ } else {
+ /*
+ * There are multiple reasons to be here:
+ * 1) All the packets on the ring have been
+ * freed - tx_id is equal to tx_first
+ * and some packets have been freed.
+ * - Done, exit
+ * 2) Interfaces has not sent a rings worth of
+ * packets yet, so the segment after tail is
+ * still empty. Or a previous call to this
+ * function freed some of the segments but
+ * not all so there is a hole in the list.
+ * Hopefully this is a rare case.
+ * - Walk the list and find the next mbuf. If
+ * there isn't one, then done.
+ */
+ if (likely((tx_id == tx_first) && (count != 0)))
+ break;
+
+ /*
+ * Walk the list and find the next mbuf, if any.
+ */
+ do {
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
+
+ if (sw_ring[tx_id].mbuf)
+ break;
+
+ } while (tx_id != tx_first);
+
+ /*
+ * Determine why previous loop bailed. If there
+ * is not an mbuf, done.
+ */
+ if (sw_ring[tx_id].mbuf == NULL)
+ break;
+ }
+ }
+ } else
+ count = -ENODEV;
+
+ return count;
+}
+
+int
+eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
+{
+ return igb_tx_done_cleanup(txq, free_cnt);
+}
+
static void
igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
{
@@ -1364,6 +1546,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
igb_reset_tx_queue(txq, dev);
dev->tx_pkt_burst = eth_igb_xmit_pkts;
+ dev->tx_pkt_prepare = &eth_igb_prep_pkts;
dev->data->tx_queues[queue_idx] = txq;
return 0;
@@ -1512,11 +1695,6 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct igb_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
@@ -1549,6 +1727,51 @@ eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
}
+int
+eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct igb_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct igb_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
igb_dev_clear_queues(struct rte_eth_dev *dev)
{
diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile
index a0d3358d..bf1f5da0 100644
--- a/drivers/net/ena/Makefile
+++ b/drivers/net/ena/Makefile
@@ -51,11 +51,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_com.c
SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_net
-
CFLAGS += $(INCLUDES)
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ena/base/ena_com.c b/drivers/net/ena/base/ena_com.c
index bd6f3c6b..38a05877 100644
--- a/drivers/net/ena/base/ena_com.c
+++ b/drivers/net/ena/base/ena_com.c
@@ -2242,7 +2242,6 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
struct ena_rss *rss = &ena_dev->rss;
- struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
struct ena_admin_set_feat_cmd cmd;
struct ena_admin_set_feat_resp resp;
int ret;
@@ -2269,7 +2268,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
ena_trc_err("memory address set failed\n");
return ret;
}
- cmd.control_buffer.length = sizeof(*hash_ctrl);
+ cmd.control_buffer.length =
+ sizeof(struct ena_admin_feature_rss_hash_control);
ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd,
@@ -2278,7 +2278,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
sizeof(resp));
if (unlikely(ret)) {
ena_trc_err("Failed to set hash input. error: %d\n", ret);
- ret = ENA_COM_INVAL;
+ return ENA_COM_INVAL;
}
return 0;
diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h
index 87c3bf13..7eaebf40 100644
--- a/drivers/net/ena/base/ena_plat_dpdk.h
+++ b/drivers/net/ena/base/ena_plat_dpdk.h
@@ -44,6 +44,7 @@
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_cycles.h>
+#include <rte_io.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
@@ -224,18 +225,8 @@ typedef uint64_t dma_addr_t;
#define ENA_MEM_ALLOC(dmadev, size) rte_zmalloc(NULL, size, 1)
#define ENA_MEM_FREE(dmadev, ptr) ({ENA_TOUCH(dmadev); rte_free(ptr); })
-static inline void writel(u32 value, volatile void *addr)
-{
- *(volatile u32 *)addr = value;
-}
-
-static inline u32 readl(const volatile void *addr)
-{
- return *(const volatile u32 *)addr;
-}
-
-#define ENA_REG_WRITE32(value, reg) writel((value), (reg))
-#define ENA_REG_READ32(reg) readl((reg))
+#define ENA_REG_WRITE32(value, reg) rte_write32_relaxed((value), (reg))
+#define ENA_REG_READ32(reg) rte_read32_relaxed((reg))
#define ATOMIC32_INC(i32_ptr) rte_atomic32_inc(i32_ptr)
#define ATOMIC32_DEC(i32_ptr) rte_atomic32_dec(i32_ptr)
diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c
index c1fd7bb3..64fee05d 100644
--- a/drivers/net/ena/ena_ethdev.c
+++ b/drivers/net/ena/ena_ethdev.c
@@ -33,12 +33,14 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_tcp.h>
#include <rte_atomic.h>
#include <rte_dev.h>
#include <rte_errno.h>
#include <rte_version.h>
#include <rte_eal_memconfig.h>
+#include <rte_net.h>
#include "ena_ethdev.h"
#include "ena_logs.h"
@@ -168,7 +170,15 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
#define PCI_DEVICE_ID_ENA_VF 0xEC20
#define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21
-static struct rte_pci_id pci_id_ena_map[] = {
+#define ENA_TX_OFFLOAD_MASK (\
+ PKT_TX_L4_MASK | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+#define ENA_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK)
+
+static const struct rte_pci_id pci_id_ena_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) },
{ .device_id = 0 },
@@ -179,6 +189,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev,
static int ena_dev_configure(struct rte_eth_dev *dev);
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
@@ -216,7 +228,7 @@ static int ena_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size);
static int ena_get_sset_count(struct rte_eth_dev *dev, int sset);
-static struct eth_dev_ops ena_dev_ops = {
+static const struct eth_dev_ops ena_dev_ops = {
.dev_configure = ena_dev_configure,
.dev_infos_get = ena_infos_get,
.rx_queue_setup = ena_rx_queue_setup,
@@ -669,7 +681,7 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring)
ring->rx_buffer_info[ring->next_to_clean & ring_mask];
if (m)
- __rte_mbuf_raw_free(m);
+ rte_mbuf_raw_free(m);
ring->next_to_clean++;
}
@@ -730,7 +742,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev,
if (rc) {
PMD_INIT_LOG(ERR,
- "failed to restart queue %d type(%d)\n",
+ "failed to restart queue %d type(%d)",
i, ring_type);
return -1;
}
@@ -756,7 +768,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter)
uint32_t max_frame_len = ena_get_mtu_conf(adapter);
if (max_frame_len > adapter->max_mtu) {
- PMD_INIT_LOG(ERR, "Unsupported MTU of %d\n", max_frame_len);
+ PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len);
return -1;
}
@@ -783,7 +795,7 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev,
queue_size = rte_align32pow2(queue_size >> 1);
if (queue_size == 0) {
- PMD_INIT_LOG(ERR, "Invalid queue size\n");
+ PMD_INIT_LOG(ERR, "Invalid queue size");
return -EFAULT;
}
@@ -908,7 +920,7 @@ static int ena_start(struct rte_eth_dev *dev)
static int ena_queue_restart(struct ena_ring *ring)
{
- int rc;
+ int rc, bufs_num;
ena_assert_msg(ring->configured == 1,
"Trying to restart unconfigured queue\n");
@@ -919,9 +931,10 @@ static int ena_queue_restart(struct ena_ring *ring)
if (ring->type == ENA_RING_TYPE_TX)
return 0;
- rc = ena_populate_rx_queue(ring, ring->ring_size);
- if ((unsigned int)rc != ring->ring_size) {
- PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n");
+ bufs_num = ring->ring_size - 1;
+ rc = ena_populate_rx_queue(ring, bufs_num);
+ if (rc != bufs_num) {
+ PMD_INIT_LOG(ERR, "Failed to populate rx ring !");
return (-1);
}
@@ -1132,7 +1145,7 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
return 0;
in_use = rxq->next_to_use - rxq->next_to_clean;
- ena_assert_msg(((in_use + count) <= ring_size), "bad ring state");
+ ena_assert_msg(((in_use + count) < ring_size), "bad ring state");
count = RTE_MIN(count,
(uint16_t)(ring_size - (next_to_use & ring_mask)));
@@ -1160,6 +1173,8 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count)
rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq,
&ebuf, next_to_use_masked);
if (unlikely(rc)) {
+ rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf),
+ count - i);
RTE_LOG(WARNING, PMD, "failed adding rx desc\n");
break;
}
@@ -1267,16 +1282,17 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &ena_dev_ops;
eth_dev->rx_pkt_burst = &eth_ena_recv_pkts;
eth_dev->tx_pkt_burst = &eth_ena_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &eth_ena_prep_pkts;
adapter->rte_eth_dev_data = eth_dev->data;
adapter->rte_dev = eth_dev;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
adapter->pdev = pci_dev;
- PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n",
+ PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d",
pci_dev->addr.domain,
pci_dev->addr.bus,
pci_dev->addr.devid,
@@ -1293,7 +1309,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
else if (adapter->regs)
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
else
- PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n",
+ PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)",
ENA_REGS_BAR);
ena_dev->reg_bar = adapter->regs;
@@ -1307,7 +1323,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
/* device specific initialization routine */
rc = ena_device_init(ena_dev, &get_feat_ctx);
if (rc) {
- PMD_INIT_LOG(CRIT, "Failed to init ENA device\n");
+ PMD_INIT_LOG(CRIT, "Failed to init ENA device");
return -1;
}
@@ -1315,7 +1331,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
if (get_feat_ctx.max_queues.max_llq_num == 0) {
PMD_INIT_LOG(ERR,
"Trying to use LLQ but llq_num is 0.\n"
- "Fall back into regular queues.\n");
+ "Fall back into regular queues.");
ena_dev->tx_mem_queue_type =
ENA_ADMIN_PLACEMENT_POLICY_HOST;
adapter->num_queues =
@@ -1343,6 +1359,10 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev)
/* Set max MTU for this device */
adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu;
+ /* set device support for TSO */
+ adapter->tso4_supported = get_feat_ctx.offload.tx &
+ ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK;
+
/* Copy MAC address and point DPDK to it */
eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr;
ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr,
@@ -1369,7 +1389,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev)
if (!(adapter->state == ENA_ADAPTER_STATE_INIT ||
adapter->state == ENA_ADAPTER_STATE_STOPPED)) {
- PMD_INIT_LOG(ERR, "Illegal adapter state: %d\n",
+ PMD_INIT_LOG(ERR, "Illegal adapter state: %d",
adapter->state);
return -1;
}
@@ -1431,6 +1451,8 @@ static void ena_infos_get(struct rte_eth_dev *dev,
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device");
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->speed_capa =
ETH_LINK_SPEED_1G |
ETH_LINK_SPEED_2_5G |
@@ -1459,7 +1481,7 @@ static void ena_infos_get(struct rte_eth_dev *dev,
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
- if (feat.offload.tx &
+ if (feat.offload.rx_supported &
ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK)
rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
@@ -1556,15 +1578,86 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
recv_idx++;
}
+ rx_ring->next_to_clean = next_to_clean;
+
+ desc_in_use = desc_in_use - completed + 1;
/* Burst refill to save doorbells, memory barriers, const interval */
if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size))
ena_populate_rx_queue(rx_ring, ring_size - desc_in_use);
- rx_ring->next_to_clean = next_to_clean;
-
return recv_idx;
}
+static uint16_t
+eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ struct rte_mbuf *m;
+ struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue);
+ struct ipv4_hdr *ip_hdr;
+ uint64_t ol_flags;
+ uint16_t frag_field;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ if (!(ol_flags & PKT_TX_IPV4))
+ continue;
+
+ /* If there was not L2 header length specified, assume it is
+ * length of the ethernet header.
+ */
+ if (unlikely(m->l2_len == 0))
+ m->l2_len = sizeof(struct ether_hdr);
+
+ ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ m->l2_len);
+ frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset);
+
+ if ((frag_field & IPV4_HDR_DF_FLAG) != 0) {
+ m->packet_type |= RTE_PTYPE_L4_NONFRAG;
+
+ /* If IPv4 header has DF flag enabled and TSO support is
+ * disabled, partial chcecksum should not be calculated.
+ */
+ if (!tx_ring->adapter->tso4_supported)
+ continue;
+ }
+
+ if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+
+ /* In case we are supposed to TSO and have DF not set (DF=0)
+ * hardware must be provided with partial checksum, otherwise
+ * it will take care of necessary calculations.
+ */
+
+ ret = rte_net_intel_cksum_flags_prepare(m,
+ ol_flags & ~PKT_TX_TCP_SEG);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -1698,16 +1791,25 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return sent_idx;
}
-static struct eth_driver rte_ena_pmd = {
- .pci_drv = {
- .id_table = pci_id_ena_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_ena_dev_init,
- .dev_private_size = sizeof(struct ena_adapter),
+static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ena_adapter), eth_ena_dev_init);
+}
+
+static int eth_ena_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_ena_pmd = {
+ .id_table = pci_id_ena_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_ena_pci_probe,
+ .remove = eth_ena_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h
index 4c7edbb9..dc3080ff 100644
--- a/drivers/net/ena/ena_ethdev.h
+++ b/drivers/net/ena/ena_ethdev.h
@@ -162,6 +162,7 @@ struct ena_adapter {
u16 num_queues;
u16 max_mtu;
+ u8 tso4_supported;
int id_number;
char name[ENA_NAME_MAX_LEN];
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 3926b795..2c7496dc 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -63,10 +63,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_intr.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rq.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_rss.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += lib/librte_hash
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index a3d2a0fb..d17a35f4 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -60,6 +60,7 @@
#define ENIC_RQ_MAX 16
#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2))
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_MAX_MAC_ADDR 64
#define VLAN_ETH_HLEN 18
@@ -278,8 +279,8 @@ extern void enic_dev_stats_get(struct enic *enic,
struct rte_eth_stats *r_stats);
extern void enic_dev_stats_clear(struct enic *enic);
extern void enic_add_packet_filter(struct enic *enic);
-extern void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
-extern void enic_del_mac_address(struct enic *enic);
+int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
+void enic_del_mac_address(struct enic *enic, int mac_index);
extern unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
struct rte_mbuf *tx_pkt, unsigned short len,
diff --git a/drivers/net/enic/enic_compat.h b/drivers/net/enic/enic_compat.h
index 5dbd983b..fc58bb41 100644
--- a/drivers/net/enic/enic_compat.h
+++ b/drivers/net/enic/enic_compat.h
@@ -41,6 +41,7 @@
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_log.h>
+#include <rte_io.h>
#define ENIC_PAGE_ALIGN 4096UL
#define ENIC_ALIGN ENIC_PAGE_ALIGN
@@ -95,42 +96,52 @@ typedef unsigned long long dma_addr_t;
static inline uint32_t ioread32(volatile void *addr)
{
- return *(volatile uint32_t *)addr;
+ return rte_read32(addr);
}
static inline uint16_t ioread16(volatile void *addr)
{
- return *(volatile uint16_t *)addr;
+ return rte_read16(addr);
}
static inline uint8_t ioread8(volatile void *addr)
{
- return *(volatile uint8_t *)addr;
+ return rte_read8(addr);
}
static inline void iowrite32(uint32_t val, volatile void *addr)
{
- *(volatile uint32_t *)addr = val;
+ rte_write32(val, addr);
+}
+
+static inline void iowrite32_relaxed(uint32_t val, volatile void *addr)
+{
+ rte_write32_relaxed(val, addr);
}
static inline void iowrite16(uint16_t val, volatile void *addr)
{
- *(volatile uint16_t *)addr = val;
+ rte_write16(val, addr);
}
static inline void iowrite8(uint8_t val, volatile void *addr)
{
- *(volatile uint8_t *)addr = val;
+ rte_write8(val, addr);
}
static inline unsigned int readl(volatile void __iomem *addr)
{
- return *(volatile unsigned int *)addr;
+ return rte_read32(addr);
+}
+
+static inline unsigned int readl_relaxed(volatile void __iomem *addr)
+{
+ return rte_read32_relaxed(addr);
}
static inline void writel(unsigned int val, volatile void __iomem *addr)
{
- *(volatile unsigned int *)addr = val;
+ rte_write32(val, addr);
}
#define min_t(type, x, y) ({ \
diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
index 2b154ec2..372bae73 100644
--- a/drivers/net/enic/enic_ethdev.c
+++ b/drivers/net/enic/enic_ethdev.c
@@ -38,6 +38,7 @@
#include <rte_dev.h>
#include <rte_pci.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_string_fns.h>
#include "vnic_intr.h"
@@ -272,11 +273,6 @@ static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t cq_idx;
int rq_num;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- dev_err(enic, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
cq = &enic->cq[enic_cq_rq(enic, rq_num)];
cq_idx = cq->to_clean;
@@ -459,12 +455,13 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
+ device_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
device_info->max_rx_queues = enic->conf_rq_count / 2;
device_info->max_tx_queues = enic->conf_wq_count;
device_info->min_rx_bufsize = ENIC_MIN_MTU;
device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
- device_info->max_mac_addrs = 1;
+ device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
device_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
@@ -474,7 +471,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
@@ -535,22 +533,22 @@ static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
enic_add_packet_filter(enic);
}
-static void enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
+static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
struct ether_addr *mac_addr,
__rte_unused uint32_t index, __rte_unused uint32_t pool)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- enic_set_mac_address(enic, mac_addr->addr_bytes);
+ return enic_set_mac_address(enic, mac_addr->addr_bytes);
}
-static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused uint32_t index)
+static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- enic_del_mac_address(enic);
+ enic_del_mac_address(enic, index);
}
static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
@@ -621,7 +619,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
- pdev = eth_dev->pci_dev;
+ pdev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pdev);
enic->pdev = pdev;
addr = &pdev->addr;
@@ -632,16 +630,25 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
return enic_probe(enic);
}
-static struct eth_driver rte_enic_pmd = {
- .pci_drv = {
- .id_table = pci_id_enic_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_enicpmd_dev_init,
- .dev_private_size = sizeof(struct enic),
+static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
+ eth_enicpmd_dev_init);
+}
+
+static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_enic_pmd = {
+ .id_table = pci_id_enic_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_enic_pci_probe,
+ .remove = eth_enic_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 1861a32c..d0262418 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -193,35 +193,28 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
}
-void enic_del_mac_address(struct enic *enic)
+void enic_del_mac_address(struct enic *enic, int mac_index)
{
- if (vnic_dev_del_addr(enic->vdev, enic->mac_addr))
+ struct rte_eth_dev *eth_dev = enic->rte_dev;
+ uint8_t *mac_addr = eth_dev->data->mac_addrs[mac_index].addr_bytes;
+
+ if (vnic_dev_del_addr(enic->vdev, mac_addr))
dev_err(enic, "del mac addr failed\n");
}
-void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
+int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
{
int err;
if (!is_eth_addr_valid(mac_addr)) {
dev_err(enic, "invalid mac address\n");
- return;
- }
-
- err = vnic_dev_del_addr(enic->vdev, enic->mac_addr);
- if (err) {
- dev_err(enic, "del mac addr failed\n");
- return;
+ return -EINVAL;
}
- ether_addr_copy((struct ether_addr *)mac_addr,
- (struct ether_addr *)enic->mac_addr);
-
err = vnic_dev_add_addr(enic->vdev, mac_addr);
- if (err) {
+ if (err)
dev_err(enic, "add mac addr failed\n");
- return;
- }
+ return err;
}
static void
@@ -429,8 +422,7 @@ int enic_link_update(struct enic *enic)
}
static void
-enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
- void *arg)
+enic_intr_handler(void *arg)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)arg;
struct enic *enic = pmd_priv(dev);
@@ -1311,13 +1303,14 @@ static int enic_dev_init(struct enic *enic)
/* Get the supported filters */
enic_fdir_info(enic);
- eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
+ eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN
+ * ENIC_MAX_MAC_ADDR, 0);
if (!eth_dev->data->mac_addrs) {
dev_err(enic, "mac addr storage alloc failed, aborting.\n");
return -1;
}
ether_addr_copy((struct ether_addr *) enic->mac_addr,
- &eth_dev->data->mac_addrs[0]);
+ eth_dev->data->mac_addrs);
vnic_dev_set_reset_flag(enic->vdev, 0);
diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
index 912ea157..ba0cfd01 100644
--- a/drivers/net/enic/enic_rxtx.c
+++ b/drivers/net/enic/enic_rxtx.c
@@ -37,6 +37,9 @@
#include "enic_compat.h"
#include "rq_enet_desc.h"
#include "enic.h"
+#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
#define RTE_PMD_USE_PREFETCH
@@ -129,6 +132,60 @@ enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
}
+/* Find the offset to L5. This is needed by enic TSO implementation.
+ * Return 0 if not a TCP packet or can't figure out the length.
+ */
+static inline uint8_t tso_header_len(struct rte_mbuf *mbuf)
+{
+ struct ether_hdr *eh;
+ struct vlan_hdr *vh;
+ struct ipv4_hdr *ip4;
+ struct ipv6_hdr *ip6;
+ struct tcp_hdr *th;
+ uint8_t hdr_len;
+ uint16_t ether_type;
+
+ /* offset past Ethernet header */
+ eh = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
+ ether_type = eh->ether_type;
+ hdr_len = sizeof(struct ether_hdr);
+ if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
+ vh = rte_pktmbuf_mtod_offset(mbuf, struct vlan_hdr *, hdr_len);
+ ether_type = vh->eth_proto;
+ hdr_len += sizeof(struct vlan_hdr);
+ }
+
+ /* offset past IP header */
+ switch (rte_be_to_cpu_16(ether_type)) {
+ case ETHER_TYPE_IPv4:
+ ip4 = rte_pktmbuf_mtod_offset(mbuf, struct ipv4_hdr *, hdr_len);
+ if (ip4->next_proto_id != IPPROTO_TCP)
+ return 0;
+ hdr_len += (ip4->version_ihl & 0xf) * 4;
+ break;
+ case ETHER_TYPE_IPv6:
+ ip6 = rte_pktmbuf_mtod_offset(mbuf, struct ipv6_hdr *, hdr_len);
+ if (ip6->proto != IPPROTO_TCP)
+ return 0;
+ hdr_len += sizeof(struct ipv6_hdr);
+ break;
+ default:
+ return 0;
+ }
+
+ if ((hdr_len + sizeof(struct tcp_hdr)) > mbuf->pkt_len)
+ return 0;
+
+ /* offset past TCP header */
+ th = rte_pktmbuf_mtod_offset(mbuf, struct tcp_hdr *, hdr_len);
+ hdr_len += (th->data_off >> 4) * 4;
+
+ if (hdr_len > mbuf->pkt_len)
+ return 0;
+
+ return hdr_len;
+}
+
static inline uint8_t
enic_cq_rx_check_err(struct cq_desc *cqd)
{
@@ -203,16 +260,25 @@ enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
}
/* checksum flags */
- if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
- (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
- uint32_t l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
-
- if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
- pkt_flags |= PKT_RX_IP_CKSUM_BAD;
- if (l4_flags == RTE_PTYPE_L4_UDP ||
- l4_flags == RTE_PTYPE_L4_TCP) {
- if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
- pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ if (mbuf->packet_type & RTE_PTYPE_L3_IPV4) {
+ if (enic_cq_rx_desc_csum_not_calc(cqrd))
+ pkt_flags |= (PKT_RX_IP_CKSUM_UNKNOWN &
+ PKT_RX_L4_CKSUM_UNKNOWN);
+ else {
+ uint32_t l4_flags;
+ l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
+
+ if (enic_cq_rx_desc_ipv4_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (l4_flags & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
+ if (enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))
+ pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else
+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
}
}
@@ -320,7 +386,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
if (rq->is_sop) {
first_seg = rxmb;
- first_seg->nb_segs = 1;
first_seg->pkt_len = seg_length;
} else {
first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
@@ -329,7 +394,6 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
last_seg->next = rxmb;
}
- rxmb->next = NULL;
rxmb->port = enic->port_id;
rxmb->data_len = seg_length;
@@ -380,10 +444,11 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rte_mb();
if (data_rq->in_use)
- iowrite32(data_rq->posted_index,
- &data_rq->ctrl->posted_index);
+ iowrite32_relaxed(data_rq->posted_index,
+ &data_rq->ctrl->posted_index);
rte_compiler_barrier();
- iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
+ iowrite32_relaxed(sop_rq->posted_index,
+ &sop_rq->ctrl->posted_index);
}
@@ -406,7 +471,7 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
pool = ((struct rte_mbuf *)buf->mb)->pool;
for (i = 0; i < nb_to_free; i++) {
buf = &wq->bufs[tail_idx];
- m = __rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
+ m = rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
buf->mb = NULL;
if (unlikely(m == NULL)) {
@@ -466,6 +531,8 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint8_t vlan_tag_insert;
uint8_t eop;
uint64_t bus_addr;
+ uint8_t offload_mode;
+ uint16_t header_len;
enic_cleanup_wq(enic, wq);
wq_desc_avail = vnic_wq_desc_avail(wq);
@@ -504,13 +571,17 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
desc_p = descs + head_idx;
eop = (data_len == pkt_len);
-
- if (ol_flags & ol_flags_mask) {
- if (ol_flags & PKT_TX_VLAN_PKT) {
- vlan_tag_insert = 1;
- vlan_id = tx_pkt->vlan_tci;
+ offload_mode = WQ_ENET_OFFLOAD_MODE_CSUM;
+ header_len = 0;
+
+ if (tx_pkt->tso_segsz) {
+ header_len = tso_header_len(tx_pkt);
+ if (header_len) {
+ offload_mode = WQ_ENET_OFFLOAD_MODE_TSO;
+ mss = tx_pkt->tso_segsz;
}
-
+ }
+ if ((ol_flags & ol_flags_mask) && (header_len == 0)) {
if (ol_flags & PKT_TX_IP_CKSUM)
mss |= ENIC_CALC_IP_CKSUM;
@@ -523,8 +594,14 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
}
- wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
- eop, 0, vlan_tag_insert, vlan_id, 0);
+ if (ol_flags & PKT_TX_VLAN_PKT) {
+ vlan_tag_insert = 1;
+ vlan_id = tx_pkt->vlan_tci;
+ }
+
+ wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
+ offload_mode, eop, eop, 0, vlan_tag_insert,
+ vlan_id, 0);
*desc_p = desc_tmp;
buf = &wq->bufs[head_idx];
@@ -544,8 +621,9 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ tx_pkt->data_off);
wq_enet_desc_enc((struct wq_enet_desc *)
&desc_tmp, bus_addr, data_len,
- mss, 0, 0, eop, eop, 0,
- vlan_tag_insert, vlan_id, 0);
+ mss, 0, offload_mode, eop, eop,
+ 0, vlan_tag_insert, vlan_id,
+ 0);
*desc_p = desc_tmp;
buf = &wq->bufs[head_idx];
@@ -557,7 +635,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
post:
rte_wmb();
- iowrite32(head_idx, &wq->ctrl->posted_index);
+ iowrite32_relaxed(head_idx, &wq->ctrl->posted_index);
done:
wq->ring.desc_avail = wq_desc_avail;
wq->head_idx = head_idx;
diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile
index afcbd1d8..e0024f05 100644
--- a/drivers/net/fm10k/Makefile
+++ b/drivers/net/fm10k/Makefile
@@ -71,6 +71,9 @@ CFLAGS_BASE_DRIVER += -Wno-missing-field-initializers
ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1)
CFLAGS += -Wno-deprecated
CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
endif
endif
@@ -84,6 +87,7 @@ VPATH += $(SRCDIR)/base
#
# all source are stored in SRCS-y
+# base driver is based on the package of cid-fm10k.2017.01.24.tar.gz
#
SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_rxtx.c
@@ -96,10 +100,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += fm10k_api.c
SRCS-$(CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR) += fm10k_rxtx_vec.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/fm10k/base/fm10k_common.c b/drivers/net/fm10k/base/fm10k_common.c
index a90d2f0b..29f35d7d 100644
--- a/drivers/net/fm10k/base/fm10k_common.c
+++ b/drivers/net/fm10k/base/fm10k_common.c
@@ -230,6 +230,9 @@ s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
/* clear tx_ready to prevent any false hits for reset */
hw->mac.tx_ready = false;
+ if (FM10K_REMOVED(hw->hw_addr))
+ return FM10K_SUCCESS;
+
/* clear the enable bit for all rings */
for (i = 0; i < q_cnt; i++) {
reg = FM10K_READ_REG(hw, FM10K_TXDCTL(i));
@@ -542,7 +545,7 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
/* if we somehow dropped the Tx enable we should reset */
- if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
+ if (mac->tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
ret_val = FM10K_ERR_RESET_REQUESTED;
goto out;
}
@@ -558,8 +561,12 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
/* interface cannot receive traffic without logical ports */
- if (mac->dglort_map == FM10K_DGLORTMAP_NONE)
+ if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
+ if (mac->ops.request_lport_map)
+ ret_val = mac->ops.request_lport_map(hw);
+
goto out;
+ }
/* if we passed all the tests above then the switch is ready and we no
* longer need to check for link
diff --git a/drivers/net/fm10k/base/fm10k_mbx.c b/drivers/net/fm10k/base/fm10k_mbx.c
index 2e704340..16ab98d3 100644
--- a/drivers/net/fm10k/base/fm10k_mbx.c
+++ b/drivers/net/fm10k/base/fm10k_mbx.c
@@ -2066,9 +2066,10 @@ STATIC void fm10k_sm_mbx_create_reply(struct fm10k_hw *hw,
* function can also be used to respond to an error as the connection
* resetting would also be a means of dealing with errors.
**/
-STATIC void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
- struct fm10k_mbx_info *mbx)
+STATIC s32 fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
+ struct fm10k_mbx_info *mbx)
{
+ s32 err = FM10K_SUCCESS;
const enum fm10k_mbx_state state = mbx->state;
switch (state) {
@@ -2081,6 +2082,7 @@ STATIC void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
case FM10K_STATE_OPEN:
/* flush any incomplete work */
fm10k_sm_mbx_connect_reset(mbx);
+ err = FM10K_ERR_RESET_REQUESTED;
break;
case FM10K_STATE_CONNECT:
/* Update remote value to match local value */
@@ -2090,6 +2092,8 @@ STATIC void fm10k_sm_mbx_process_reset(struct fm10k_hw *hw,
}
fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail);
+
+ return err;
}
/**
@@ -2172,7 +2176,7 @@ STATIC s32 fm10k_sm_mbx_process(struct fm10k_hw *hw,
switch (FM10K_MSG_HDR_FIELD_GET(mbx->mbx_hdr, SM_VER)) {
case 0:
- fm10k_sm_mbx_process_reset(hw, mbx);
+ err = fm10k_sm_mbx_process_reset(hw, mbx);
break;
case FM10K_SM_MBX_VERSION:
err = fm10k_sm_mbx_process_version_1(hw, mbx);
diff --git a/drivers/net/fm10k/base/fm10k_mbx.h b/drivers/net/fm10k/base/fm10k_mbx.h
index edc57dff..2fac012c 100644
--- a/drivers/net/fm10k/base/fm10k_mbx.h
+++ b/drivers/net/fm10k/base/fm10k_mbx.h
@@ -54,6 +54,8 @@ struct fm10k_mbx_info;
#define FM10K_MBX_ACK_INTERRUPT 0x00000010
#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
+#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200
+#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400
#define FM10K_MBICR(_n) ((_n) + 0x18840)
#define FM10K_GMBX 0x18842
diff --git a/drivers/net/fm10k/base/fm10k_osdep.h b/drivers/net/fm10k/base/fm10k_osdep.h
index a21daa2a..199ebd8e 100644
--- a/drivers/net/fm10k/base/fm10k_osdep.h
+++ b/drivers/net/fm10k/base/fm10k_osdep.h
@@ -39,6 +39,8 @@ POSSIBILITY OF SUCH DAMAGE.
#include <rte_atomic.h>
#include <rte_byteorder.h>
#include <rte_cycles.h>
+#include <rte_io.h>
+
#include "../fm10k_logs.h"
/* TODO: this does not look like it should be used... */
@@ -88,17 +90,16 @@ typedef int bool;
#endif
/* offsets are WORD offsets, not BYTE offsets */
-#define FM10K_WRITE_REG(hw, reg, val) \
- ((((volatile uint32_t *)(hw)->hw_addr)[(reg)]) = ((uint32_t)(val)))
-#define FM10K_READ_REG(hw, reg) \
- (((volatile uint32_t *)(hw)->hw_addr)[(reg)])
+#define FM10K_WRITE_REG(hw, reg, val) \
+ rte_write32((val), ((hw)->hw_addr + (reg)))
+
+#define FM10K_READ_REG(hw, reg) rte_read32(((hw)->hw_addr + (reg)))
+
#define FM10K_WRITE_FLUSH(a) FM10K_READ_REG(a, FM10K_CTRL)
-#define FM10K_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define FM10K_PCI_REG(reg) rte_read32(reg)
-#define FM10K_PCI_REG_WRITE(reg, value) do { \
- FM10K_PCI_REG((reg)) = (value); \
-} while (0)
+#define FM10K_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
/* not implemented */
#define FM10K_READ_PCI_WORD(hw, reg) 0
@@ -162,23 +163,6 @@ typedef int bool;
#define FM10K_RXD_PKTTYPE_MASK 0x03F0
#define FM10K_RXD_PKTTYPE_SHIFT 4
-enum fm10k_rdesc_pkt_type {
- /* L3 type */
- FM10K_PKTTYPE_OTHER = 0x00,
- FM10K_PKTTYPE_IPV4 = 0x01,
- FM10K_PKTTYPE_IPV4_EX = 0x02,
- FM10K_PKTTYPE_IPV6 = 0x03,
- FM10K_PKTTYPE_IPV6_EX = 0x04,
-
- /* L4 type */
- FM10K_PKTTYPE_TCP = 0x08,
- FM10K_PKTTYPE_UDP = 0x10,
- FM10K_PKTTYPE_GRE = 0x18,
- FM10K_PKTTYPE_VXLAN = 0x20,
- FM10K_PKTTYPE_NVGRE = 0x28,
- FM10K_PKTTYPE_GENEVE = 0x30
-};
-
#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */
#define FM10K_RXD_STATUS_HBO 0x0400 /* header buffer overrun */
diff --git a/drivers/net/fm10k/base/fm10k_pf.c b/drivers/net/fm10k/base/fm10k_pf.c
index 105babf4..db5f4912 100644
--- a/drivers/net/fm10k/base/fm10k_pf.c
+++ b/drivers/net/fm10k/base/fm10k_pf.c
@@ -66,21 +66,21 @@ STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
/* shut down all rings */
err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
- if (err)
+ if (err == FM10K_ERR_REQUESTS_PENDING) {
+ hw->mac.reset_while_pending++;
+ goto force_reset;
+ } else if (err) {
return err;
+ }
/* Verify that DMA is no longer active */
reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL);
if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
return FM10K_ERR_DMA_PENDING;
- /* verify the switch is ready for reset */
- reg = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
- if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
- goto out;
-
+force_reset:
/* Inititate data path reset */
- reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
+ reg = FM10K_DMA_CTRL_DATAPATH_RESET;
FM10K_WRITE_REG(hw, FM10K_DMA_CTRL, reg);
/* Flush write and allow 100us for reset to complete */
@@ -90,10 +90,9 @@ STATIC s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
/* Verify we made it out of reset */
reg = FM10K_READ_REG(hw, FM10K_IP);
if (!(reg & FM10K_IP_NOTINRESET))
- err = FM10K_ERR_RESET_FAILED;
+ return FM10K_ERR_RESET_FAILED;
-out:
- return err;
+ return FM10K_SUCCESS;
}
/**
@@ -255,8 +254,8 @@ STATIC s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
/* VLAN multi-bit write:
* The multi-bit write has several parts to it.
- * 3 2 1 0
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * 24 16 8 0
+ * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | RSVD0 | Length |C|RSVD0| VLAN ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -412,7 +411,7 @@ STATIC s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
DEBUGFUNC("fm10k_update_uc_addr_pf");
/* verify MAC address is valid */
- if (!FM10K_IS_VALID_ETHER_ADDR(mac))
+ if (!IS_VALID_ETHER_ADDR(mac))
return FM10K_ERR_PARAM;
return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
@@ -435,7 +434,7 @@ STATIC s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
DEBUGFUNC("fm10k_update_mc_addr_pf");
/* verify multicast address is valid */
- if (!FM10K_IS_MULTICAST_ETHER_ADDR(mac))
+ if (!IS_MULTICAST_ETHER_ADDR(mac))
return FM10K_ERR_PARAM;
return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
@@ -536,6 +535,10 @@ STATIC s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
if (!fm10k_glort_valid_pf(hw, glort))
return FM10K_ERR_PARAM;
+ /* reset multicast mode if deleting lport */
+ if (!enable)
+ fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
/* construct the lport message from the 2 pieces of data we have */
lport_msg = ((u32)count << 16) | glort;
@@ -908,13 +911,13 @@ STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
qmap_idx = qmap_stride * vf_idx;
- /* MAP Tx queue back to 0 temporarily, and disable it */
- FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
- FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
-
- /* determine correct default VLAN ID */
+ /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+ * used here to indicate to the VF that it will not have privilege to
+ * write VLAN_TABLE. All policy is enforced on the PF but this allows
+ * the VF to correctly report errors to userspace rqeuests.
+ */
if (vf_info->pf_vid)
- vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
+ vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
else
vf_vid = vf_info->sw_vid;
@@ -923,9 +926,35 @@ STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
vf_info->mac, vf_vid);
- /* load onto outgoing mailbox, ignore any errors on enqueue */
- if (vf_info->mbx.ops.enqueue_tx)
- vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ /* Configure Queue control register with new VLAN ID. The TXQCTL
+ * register is RO from the VF, so the PF must do this even in the
+ * case of notifying the VF of a new VID via the mailbox.
+ */
+ txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
+ FM10K_TXQCTL_VID_MASK;
+ txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
+ FM10K_TXQCTL_VF | vf_idx;
+
+ for (i = 0; i < queues_per_pool; i++)
+ FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
+
+ /* try loading a message onto outgoing mailbox first */
+ if (vf_info->mbx.ops.enqueue_tx) {
+ err = vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
+ if (err != FM10K_MBX_ERR_NO_MBX)
+ return err;
+ err = FM10K_SUCCESS;
+ }
+
+ /* If we aren't connected to a mailbox, this is most likely because
+ * the VF driver is not running. It should thus be safe to re-map
+ * queues and use the registers to pass the MAC address so that the VF
+ * driver gets correct information during its initialization.
+ */
+
+ /* MAP Tx queue back to 0 temporarily, and disable it */
+ FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), 0);
+ FM10K_WRITE_REG(hw, FM10K_TXDCTL(vf_q_idx), 0);
/* verify ring has disabled before modifying base address registers */
txdctl = FM10K_READ_REG(hw, FM10K_TXDCTL(vf_q_idx));
@@ -941,7 +970,7 @@ STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
}
/* Update base address registers to contain MAC address */
- if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
tdbal = (((u32)vf_info->mac[3]) << 24) |
(((u32)vf_info->mac[4]) << 16) |
(((u32)vf_info->mac[5]) << 8);
@@ -964,16 +993,6 @@ STATIC s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
FM10K_TDLEN_ITR_SCALE_SHIFT);
err_out:
- /* configure Queue control register */
- txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
- FM10K_TXQCTL_VID_MASK;
- txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
- FM10K_TXQCTL_VF | vf_idx;
-
- /* assign VLAN ID */
- for (i = 0; i < queues_per_pool; i++)
- FM10K_WRITE_REG(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
-
/* restore the queue back to VF ownership */
FM10K_WRITE_REG(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
return err;
@@ -1081,7 +1100,7 @@ STATIC s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
FM10K_WRITE_REG(hw, FM10K_MRQC(vf_info->vsi), 0);
/* Update base address registers to contain MAC address */
- if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac)) {
+ if (IS_VALID_ETHER_ADDR(vf_info->mac)) {
tdbal = (((u32)vf_info->mac[3]) << 24) |
(((u32)vf_info->mac[4]) << 16) |
(((u32)vf_info->mac[5]) << 8);
@@ -1267,18 +1286,32 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
if (err)
return err;
- /* verify upper 16 bits are zero */
- if (vid >> 16)
- return FM10K_ERR_PARAM;
-
set = !(vid & FM10K_VLAN_CLEAR);
vid &= ~FM10K_VLAN_CLEAR;
- err = fm10k_iov_select_vid(vf_info, (u16)vid);
- if (err < 0)
- return err;
+ /* if the length field has been set, this is a multi-bit
+ * update request. For multi-bit requests, simply disallow
+ * them when the pf_vid has been set. In this case, the PF
+ * should have already cleared the VLAN_TABLE, and if we
+ * allowed them, it could allow a rogue VF to receive traffic
+ * on a VLAN it was not assigned. In the single-bit case, we
+ * need to modify requests for VLAN 0 to use the default PF or
+ * SW vid when assigned.
+ */
- vid = err;
+ if (vid >> 16) {
+ /* prevent multi-bit requests when PF has
+ * administratively set the VLAN for this VF
+ */
+ if (vf_info->pf_vid)
+ return FM10K_ERR_PARAM;
+ } else {
+ err = fm10k_iov_select_vid(vf_info, (u16)vid);
+ if (err < 0)
+ return err;
+
+ vid = err;
+ }
/* update VSI info for VF in regards to VLAN table */
err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
@@ -1293,7 +1326,7 @@ s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
return err;
/* block attempts to set MAC for a locked device */
- if (FM10K_IS_VALID_ETHER_ADDR(vf_info->mac) &&
+ if (IS_VALID_ETHER_ADDR(vf_info->mac) &&
memcmp(mac, vf_info->mac, ETH_ALEN))
return FM10K_ERR_PARAM;
@@ -1670,13 +1703,12 @@ STATIC s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
* @hw: pointer to hardware structure
* @switch_ready: pointer to boolean value that will record switch state
*
- * This funciton will check the DMA_CTRL2 register and mailbox in order
+ * This function will check the DMA_CTRL2 register and mailbox in order
* to determine if the switch is ready for the PF to begin requesting
* addresses and mapping traffic to the local interface.
**/
STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
{
- s32 ret_val = FM10K_SUCCESS;
u32 dma_ctrl2;
DEBUGFUNC("fm10k_get_host_state_pf");
@@ -1684,23 +1716,16 @@ STATIC s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
/* verify the switch is ready for interaction */
dma_ctrl2 = FM10K_READ_REG(hw, FM10K_DMA_CTRL2);
if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
- goto out;
+ return FM10K_SUCCESS;
/* retrieve generic host state info */
- ret_val = fm10k_get_host_state_generic(hw, switch_ready);
- if (ret_val)
- goto out;
-
- /* interface cannot receive traffic without logical ports */
- if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
- ret_val = fm10k_request_lport_map_pf(hw);
-
-out:
- return ret_val;
+ return fm10k_get_host_state_generic(hw, switch_ready);
}
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
FM10K_TLV_ATTR_LAST
};
@@ -2082,6 +2107,7 @@ s32 fm10k_init_ops_pf(struct fm10k_hw *hw)
mac->ops.set_dma_mask = &fm10k_set_dma_mask_pf;
mac->ops.get_fault = &fm10k_get_fault_pf;
mac->ops.get_host_state = &fm10k_get_host_state_pf;
+ mac->ops.request_lport_map = &fm10k_request_lport_map_pf;
mac->ops.adjust_systime = &fm10k_adjust_systime_pf;
mac->ops.notify_offset = &fm10k_notify_offset_pf;
mac->ops.read_systime = &fm10k_read_systime_pf;
diff --git a/drivers/net/fm10k/base/fm10k_pf.h b/drivers/net/fm10k/base/fm10k_pf.h
index c84b1bc5..ca125c27 100644
--- a/drivers/net/fm10k/base/fm10k_pf.h
+++ b/drivers/net/fm10k/base/fm10k_pf.h
@@ -91,6 +91,8 @@ enum fm10k_pf_tlv_attr_id_v1 {
#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280
+
/* The following data structures are overlayed directly onto TLV mailbox
* messages, and must not break 4 byte alignment. Ensure the structures line
* up correctly as per their TLV definition.
diff --git a/drivers/net/fm10k/base/fm10k_tlv.c b/drivers/net/fm10k/base/fm10k_tlv.c
index e6150c1d..0328ede2 100644
--- a/drivers/net/fm10k/base/fm10k_tlv.c
+++ b/drivers/net/fm10k/base/fm10k_tlv.c
@@ -520,7 +520,8 @@ STATIC s32 fm10k_tlv_attr_validate(u32 *attr,
* up into an array of pointers stored in results. The function will
* return FM10K_ERR_PARAM on any input or message error,
* FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
- * and 0 on success.
+ * and 0 on success. Any attributes not found in tlv_attr will be silently
+ * ignored.
**/
static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
@@ -559,14 +560,15 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
while (offset < len) {
attr_id = *attr & FM10K_TLV_ID_MASK;
- if (attr_id < FM10K_TLV_RESULTS_MAX)
- err = fm10k_tlv_attr_validate(attr, tlv_attr);
- else
- err = FM10K_NOT_IMPLEMENTED;
+ if (attr_id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_NOT_IMPLEMENTED;
- if (err < 0)
+ err = fm10k_tlv_attr_validate(attr, tlv_attr);
+ if (err == FM10K_NOT_IMPLEMENTED)
+ ; /* silently ignore non-implemented attributes */
+ else if (err)
return err;
- if (!err)
+ else
results[attr_id] = attr;
/* update offset */
diff --git a/drivers/net/fm10k/base/fm10k_type.h b/drivers/net/fm10k/base/fm10k_type.h
index 3fc8f136..1f38a02c 100644
--- a/drivers/net/fm10k/base/fm10k_type.h
+++ b/drivers/net/fm10k/base/fm10k_type.h
@@ -40,6 +40,7 @@ struct fm10k_hw;
#include "fm10k_osdep.h"
#include "fm10k_mbx.h"
+#define FM10K_INTEL_VENDOR_ID 0x8086
#define FM10K_DEV_ID_PF 0x15A4
#define FM10K_DEV_ID_VF 0x15A5
#ifdef BOULDER_RAPIDS_HW
@@ -125,11 +126,15 @@ struct fm10k_hw;
/* Interrupt control registers */
#define FM10K_EICR 0x0006
+#define FM10K_EICR_PCA_FAULT 0x00000001
+#define FM10K_EICR_THI_FAULT 0x00000004
+#define FM10K_EICR_FUM_FAULT 0x00000020
#define FM10K_EICR_FAULT_MASK 0x0000003F
#define FM10K_EICR_MAILBOX 0x00000040
#define FM10K_EICR_SWITCHREADY 0x00000080
#define FM10K_EICR_SWITCHNOTREADY 0x00000100
#define FM10K_EICR_SWITCHINTERRUPT 0x00000200
+#define FM10K_EICR_SRAMERROR 0x00000400
#define FM10K_EICR_VFLR 0x00000800
#define FM10K_EICR_MAXHOLDTIME 0x00001000
#define FM10K_EIMR 0x0007
@@ -183,6 +188,7 @@ struct fm10k_hw;
#define FM10K_DGLORTDEC_INNERRSS_ENABLE 0x08000000
#define FM10K_TUNNEL_CFG 0x0040
#define FM10K_TUNNEL_CFG_NVGRE_SHIFT 16
+#define FM10K_TUNNEL_CFG_GENEVE 0x0041
#define FM10K_SWPRI_MAP(_n) ((_n) + 0x0050)
#define FM10K_SWPRI_MAX 16
#define FM10K_RSSRK(_n, _m) (((_n) * 0x10) + (_m) + 0x0800)
@@ -211,6 +217,7 @@ struct fm10k_hw;
#define FM10K_DMA_CTRL_RX_ENABLE 0x00000010
#define FM10K_DMA_CTRL_RX_ACTIVE 0x00000080
#define FM10K_DMA_CTRL_RX_DESC_SIZE 0x00000100
+#define FM10K_DMA_CTRL_MINMSS_SHIFT 9
#define FM10K_DMA_CTRL_MINMSS_64 0x00008000
#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3 0x04800000
#define FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2 0x04000000
@@ -283,6 +290,7 @@ struct fm10k_hw;
#define FM10K_RXDCTL_WRITE_BACK_MIN_DELAY 0x00000001
#define FM10K_RXDCTL_DROP_ON_EMPTY 0x00000200
#define FM10K_RXINT(_n) ((0x40 * (_n)) + 0x4008)
+#define FM10K_RXINT_TIMER_SHIFT 8
#define FM10K_SRRCTL(_n) ((0x40 * (_n)) + 0x4009)
#define FM10K_SRRCTL_BSIZEPKT_SHIFT 8 /* shift _right_ */
#define FM10K_SRRCTL_LOOPBACK_SUPPRESS 0x40000000
@@ -336,6 +344,7 @@ struct fm10k_hw;
#define FM10K_TXQCTL_VID_MASK 0x0FFF0000
#define FM10K_TXQCTL_UNLIMITED_BW 0x10000000
#define FM10K_TXINT(_n) ((0x40 * (_n)) + 0x8008)
+#define FM10K_TXINT_TIMER_SHIFT 8
/* Tx Statistics */
#define FM10K_QPTC(_n) ((0x40 * (_n)) + 0x8009)
@@ -374,6 +383,7 @@ struct fm10k_hw;
/* Switch manager interrupt registers */
#define FM10K_IP 0x13000
#define FM10K_IP_NOTINRESET 0x00000100
+#define FM10K_SRAM_IP 0x13003
/* VLAN registers */
#define FM10K_VLAN_TABLE(_n, _m) ((0x80 * (_n)) + (_m) + 0x14000)
@@ -384,6 +394,7 @@ struct fm10k_hw;
#define FM10K_VLAN_TABLE_VSI_MAX 64
#define FM10K_VLAN_LENGTH_SHIFT 16
#define FM10K_VLAN_CLEAR BIT(15)
+#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR
#define FM10K_VLAN_ALL \
((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
@@ -422,20 +433,20 @@ struct fm10k_hw;
#define ETH_ALEN 6
#endif /* ETH_ALEN */
-#ifndef FM10K_IS_ZERO_ETHER_ADDR
+#ifndef IS_ZERO_ETHER_ADDR
/* make certain address is not 0 */
-#define FM10K_IS_ZERO_ETHER_ADDR(addr) \
+#define IS_ZERO_ETHER_ADDR(addr) \
(!((addr)[0] | (addr)[1] | (addr)[2] | (addr)[3] | (addr)[4] | (addr)[5]))
#endif
-#ifndef FM10K_IS_MULTICAST_ETHER_ADDR
-#define FM10K_IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
+#ifndef IS_MULTICAST_ETHER_ADDR
+#define IS_MULTICAST_ETHER_ADDR(addr) ((addr)[0] & 0x1)
#endif
-#ifndef FM10K_IS_VALID_ETHER_ADDR
+#ifndef IS_VALID_ETHER_ADDR
/* make certain address is not multicast or 0 */
-#define FM10K_IS_VALID_ETHER_ADDR(addr) \
-(!FM10K_IS_MULTICAST_ETHER_ADDR(addr) && !FM10K_IS_ZERO_ETHER_ADDR(addr))
+#define IS_VALID_ETHER_ADDR(addr) \
+(!IS_MULTICAST_ETHER_ADDR(addr) && !IS_ZERO_ETHER_ADDR(addr))
#endif
enum fm10k_int_source {
@@ -587,6 +598,7 @@ struct fm10k_mac_ops {
s32 (*stop_hw)(struct fm10k_hw *);
s32 (*get_bus_info)(struct fm10k_hw *);
s32 (*get_host_state)(struct fm10k_hw *, bool *);
+ s32 (*request_lport_map)(struct fm10k_hw *);
#ifndef NO_IS_SLOT_APPROPRIATE_CHECK
bool (*is_slot_appropriate)(struct fm10k_hw *);
#endif
@@ -629,6 +641,7 @@ struct fm10k_mac_info {
bool tx_ready;
u32 dglort_map;
u8 itr_scale;
+ u64 reset_while_pending;
};
struct fm10k_swapi_table_info {
@@ -676,6 +689,9 @@ struct fm10k_vf_info {
u8 vf_flags; /* flags indicating what modes
* are supported for the port
*/
+#ifndef NO_FM10K_VF_TRUSTED_MODE
+ bool trusted; /* VF trust mode */
+#endif
};
#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
@@ -812,6 +828,24 @@ enum fm10k_rdesc_rss_type {
/* Reserved 0x9 - 0xF */
};
+#define FM10K_RXD_PKTTYPE_MASK 0x03F0
+#define FM10K_RXD_PKTTYPE_SHIFT 4
+enum fm10k_rdesc_pkt_type {
+ /* L3 type */
+ FM10K_PKTTYPE_OTHER = 0x00,
+ FM10K_PKTTYPE_IPV4 = 0x01,
+ FM10K_PKTTYPE_IPV4_EX = 0x02,
+ FM10K_PKTTYPE_IPV6 = 0x03,
+ FM10K_PKTTYPE_IPV6_EX = 0x04,
+
+ /* L4 type */
+ FM10K_PKTTYPE_TCP = 0x08,
+ FM10K_PKTTYPE_UDP = 0x10,
+ FM10K_PKTTYPE_GRE = 0x18,
+ FM10K_PKTTYPE_VXLAN = 0x20,
+ FM10K_PKTTYPE_NVGRE = 0x28,
+ FM10K_PKTTYPE_GENEVE = 0x30
+};
#define FM10K_RXD_HDR_INFO_XC_MASK 0x0006
enum fm10k_rxdesc_xc {
@@ -823,6 +857,7 @@ enum fm10k_rxdesc_xc {
#define FM10K_RXD_STATUS_DD 0x0001 /* Descriptor done */
#define FM10K_RXD_STATUS_EOP 0x0002 /* End of packet */
+#define FM10K_RXD_STATUS_IPCS 0x0008 /* Indicates IPv4 csum */
#define FM10K_RXD_STATUS_L4CS 0x0010 /* Indicates an L4 csum */
#define FM10K_RXD_STATUS_L4CS2 0x0040 /* Inner header L4 csum */
#define FM10K_RXD_STATUS_L4E2 0x0800 /* Inner header L4 csum err */
diff --git a/drivers/net/fm10k/base/fm10k_vf.c b/drivers/net/fm10k/base/fm10k_vf.c
index efbdbd1e..bd449773 100644
--- a/drivers/net/fm10k/base/fm10k_vf.c
+++ b/drivers/net/fm10k/base/fm10k_vf.c
@@ -49,11 +49,11 @@ STATIC s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
/* we need to disable the queues before taking further steps */
err = fm10k_stop_hw_generic(hw);
- if (err)
+ if (err && err != FM10K_ERR_REQUESTS_PENDING)
return err;
/* If permanent address is set then we need to restore it */
- if (FM10K_IS_VALID_ETHER_ADDR(perm_addr)) {
+ if (IS_VALID_ETHER_ADDR(perm_addr)) {
bal = (((u32)perm_addr[3]) << 24) |
(((u32)perm_addr[4]) << 16) |
(((u32)perm_addr[5]) << 8);
@@ -82,7 +82,7 @@ STATIC s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
FM10K_WRITE_REG(hw, FM10K_TDLEN(i), tdlen);
}
- return FM10K_SUCCESS;
+ return err;
}
/**
@@ -100,7 +100,9 @@ STATIC s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* shut down queues we own and reset DMA configuration */
err = fm10k_stop_hw_vf(hw);
- if (err)
+ if (err == FM10K_ERR_REQUESTS_PENDING)
+ hw->mac.reset_while_pending++;
+ else if (err)
return err;
/* Inititate VF reset */
@@ -113,9 +115,9 @@ STATIC s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* Clear reset bit and verify it was cleared */
FM10K_WRITE_REG(hw, FM10K_VFCTRL, 0);
if (FM10K_READ_REG(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
- err = FM10K_ERR_RESET_FAILED;
+ return FM10K_ERR_RESET_FAILED;
- return err;
+ return FM10K_SUCCESS;
}
/**
@@ -225,7 +227,7 @@ STATIC s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
if (vsi)
return FM10K_ERR_PARAM;
- /* verify upper 4 bits of vid and length are 0 */
+ /* clever trick to verify reserved bits in both vid and length */
if ((vid << 16 | vid) >> 28)
return FM10K_ERR_PARAM;
@@ -268,7 +270,7 @@ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
memcpy(hw->mac.perm_addr, perm_addr, ETH_ALEN);
hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
- hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
+ hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
return FM10K_SUCCESS;
}
@@ -339,11 +341,11 @@ STATIC s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort,
return FM10K_ERR_PARAM;
/* verify MAC address is valid */
- if (!FM10K_IS_VALID_ETHER_ADDR(mac))
+ if (!IS_VALID_ETHER_ADDR(mac))
return FM10K_ERR_PARAM;
/* verify we are not locked down on the MAC address */
- if (FM10K_IS_VALID_ETHER_ADDR(hw->mac.perm_addr) &&
+ if (IS_VALID_ETHER_ADDR(hw->mac.perm_addr) &&
memcmp(hw->mac.perm_addr, mac, ETH_ALEN))
return FM10K_ERR_PARAM;
@@ -385,7 +387,7 @@ STATIC s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort,
return FM10K_ERR_PARAM;
/* verify multicast address is valid */
- if (!FM10K_IS_MULTICAST_ETHER_ADDR(mac))
+ if (!IS_MULTICAST_ETHER_ADDR(mac))
return FM10K_ERR_PARAM;
/* add bit to notify us if this is a set or clear operation */
diff --git a/drivers/net/fm10k/fm10k.h b/drivers/net/fm10k/fm10k.h
index 05aa1a25..8e1a9506 100644
--- a/drivers/net/fm10k/fm10k.h
+++ b/drivers/net/fm10k/fm10k.h
@@ -69,6 +69,9 @@
#define FM10K_MAX_RX_DESC (FM10K_MAX_RX_RING_SZ / sizeof(union fm10k_rx_desc))
#define FM10K_MAX_TX_DESC (FM10K_MAX_TX_RING_SZ / sizeof(struct fm10k_tx_desc))
+#define FM10K_TX_MAX_SEG UINT8_MAX
+#define FM10K_TX_MAX_MTU_SEG UINT8_MAX
+
/*
* byte aligment for HW RX data buffer
* Datasheet requires RX buffer addresses shall either be 512-byte aligned or
@@ -356,14 +359,17 @@ fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
uint16_t fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t fm10k_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
int fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq);
int fm10k_rx_vec_condition_check(struct rte_eth_dev *);
void fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq);
uint16_t fm10k_recv_pkts_vec(void *, struct rte_mbuf **, uint16_t);
uint16_t fm10k_recv_scattered_pkts_vec(void *, struct rte_mbuf **,
uint16_t);
-uint16_t fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
void fm10k_txq_vec_setup(struct fm10k_tx_queue *txq);
int fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq);
diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c
index 7c51d3b5..a742eec1 100644
--- a/drivers/net/fm10k/fm10k_ethdev.c
+++ b/drivers/net/fm10k/fm10k_ethdev.c
@@ -32,6 +32,7 @@
*/
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_string_fns.h>
@@ -59,7 +60,7 @@
#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
/* default 1:1 map from queue ID to interrupt vector ID */
-#define Q2V(dev, queue_id) (dev->pci_dev->intr_handle.intr_vec[queue_id])
+#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
#define MAX_LPORT_NUM 128
@@ -197,9 +198,9 @@ fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
}
uint16_t __attribute__((weak))
-fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
- __rte_unused struct rte_mbuf **tx_pkts,
- __rte_unused uint16_t nb_pkts)
+fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
{
return 0;
}
@@ -677,7 +678,7 @@ fm10k_dev_tx_init(struct rte_eth_dev *dev)
/* Enable use of FTAG bit in TX descriptor, PFVTCTL
* register is read-only for VF.
*/
- if (fm10k_check_ftag(dev->pci_dev->device.devargs)) {
+ if (fm10k_check_ftag(dev->device->devargs)) {
if (hw->mac.type == fm10k_mac_pf) {
FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
FM10K_PFVTCTL_FTAG_DESC_ENABLE);
@@ -711,7 +712,8 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct fm10k_macvlan_filter_info *macvlan;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i, ret;
struct fm10k_rx_queue *rxq;
uint64_t base_addr;
@@ -725,13 +727,13 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
i = 0;
if (rte_intr_dp_is_en(intr_handle)) {
for (; i < dev->data->nb_rx_queues; i++) {
- FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(dev, i));
+ FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
if (hw->mac.type == fm10k_mac_pf)
- FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
else
- FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
}
@@ -1171,7 +1173,8 @@ static void
fm10k_dev_stop(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int i;
PMD_INIT_FUNC_TRACE();
@@ -1190,10 +1193,10 @@ fm10k_dev_stop(struct rte_eth_dev *dev)
FM10K_WRITE_REG(hw, FM10K_RXINT(i),
3 << FM10K_RXINT_TIMER_SHIFT);
if (hw->mac.type == fm10k_mac_pf)
- FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, i)),
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
FM10K_ITR_MASK_SET);
else
- FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, i)),
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
FM10K_ITR_MASK_SET);
}
}
@@ -1390,16 +1393,18 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
PMD_INIT_FUNC_TRACE();
+ dev_info->pci_dev = pdev;
dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
dev_info->max_rx_queues = hw->mac.max_queues;
dev_info->max_tx_queues = hw->mac.max_queues;
dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
dev_info->max_hash_mac_addrs = 0;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pdev->max_vfs;
dev_info->vmdq_pool_base = 0;
dev_info->vmdq_queue_base = 0;
dev_info->max_vmdq_pools = ETH_32_POOLS;
@@ -1450,6 +1455,8 @@ fm10k_dev_infos_get(struct rte_eth_dev *dev,
.nb_max = FM10K_MAX_TX_DESC,
.nb_min = FM10K_MIN_TX_DESC,
.nb_align = FM10K_MULT_TX_DESC,
+ .nb_seg_max = FM10K_TX_MAX_SEG,
+ .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
};
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
@@ -1683,7 +1690,7 @@ static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
}
/* Add a MAC address, and update filters */
-static void
+static int
fm10k_macaddr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index,
@@ -1694,6 +1701,7 @@ fm10k_macaddr_add(struct rte_eth_dev *dev,
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
macvlan->mac_vmdq_id[index] = pool;
+ return 0;
}
/* Remove a MAC address, and update filters */
@@ -1797,7 +1805,8 @@ fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
struct fm10k_rx_queue *q;
const struct rte_memzone *mz;
@@ -2336,15 +2345,16 @@ static int
fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
/* Enable ITR */
if (hw->mac.type == fm10k_mac_pf)
- FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
else
- FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pdev->intr_handle);
return 0;
}
@@ -2352,13 +2362,14 @@ static int
fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
/* Disable ITR */
if (hw->mac.type == fm10k_mac_pf)
- FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(dev, queue_id)),
+ FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
FM10K_ITR_MASK_SET);
else
- FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(dev, queue_id)),
+ FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
FM10K_ITR_MASK_SET);
return 0;
}
@@ -2367,7 +2378,8 @@ static int
fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
uint32_t intr_vector, vec;
uint16_t queue_id;
int result = 0;
@@ -2383,7 +2395,7 @@ fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
intr_vector = dev->data->nb_rx_queues;
/* disable interrupt first */
- rte_intr_disable(&dev->pci_dev->intr_handle);
+ rte_intr_disable(intr_handle);
if (hw->mac.type == fm10k_mac_pf)
fm10k_dev_disable_intr_pf(dev);
else
@@ -2418,7 +2430,7 @@ fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
fm10k_dev_enable_intr_pf(dev);
else
fm10k_dev_enable_intr_vf(dev);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
hw->mac.ops.update_int_moderator(hw);
return result;
}
@@ -2532,9 +2544,7 @@ error:
* void
*/
static void
-fm10k_dev_interrupt_handler_pf(
- __rte_unused struct rte_intr_handle *handle,
- void *param)
+fm10k_dev_interrupt_handler_pf(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2584,7 +2594,7 @@ fm10k_dev_interrupt_handler_pf(
FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
/* Re-enable interrupt from host side */
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(dev->intr_handle);
}
/**
@@ -2599,9 +2609,7 @@ fm10k_dev_interrupt_handler_pf(
* void
*/
static void
-fm10k_dev_interrupt_handler_vf(
- __rte_unused struct rte_intr_handle *handle,
- void *param)
+fm10k_dev_interrupt_handler_vf(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -2618,7 +2626,7 @@ fm10k_dev_interrupt_handler_vf(
FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
FM10K_ITR_MASK_CLEAR);
/* Re-enable interrupt from host side */
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(dev->intr_handle);
}
/* Mailbox message handler in VF */
@@ -2732,6 +2740,28 @@ fm10k_check_ftag(struct rte_devargs *devargs)
return 1;
}
+static uint16_t
+fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
+ ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
static void __attribute__((cold))
fm10k_set_tx_function(struct rte_eth_dev *dev)
{
@@ -2740,7 +2770,22 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
int use_sse = 1;
uint16_t tx_ftag_en = 0;
- if (fm10k_check_ftag(dev->pci_dev->device.devargs))
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ /* primary process has set the ftag flag and txq_flags */
+ txq = dev->data->tx_queues[0];
+ if (fm10k_tx_vec_condition_check(txq)) {
+ dev->tx_pkt_burst = fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = fm10k_prep_pkts;
+ PMD_INIT_LOG(DEBUG, "Use regular Tx func");
+ } else {
+ PMD_INIT_LOG(DEBUG, "Use vector Tx func");
+ dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
+ }
+ return;
+ }
+
+ if (fm10k_check_ftag(dev->device->devargs))
tx_ftag_en = 1;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
@@ -2758,8 +2803,10 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
fm10k_txq_vec_setup(txq);
}
dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
+ dev->tx_pkt_prepare = NULL;
} else {
dev->tx_pkt_burst = fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = fm10k_prep_pkts;
PMD_INIT_LOG(DEBUG, "Use regular Tx func");
}
}
@@ -2767,11 +2814,12 @@ fm10k_set_tx_function(struct rte_eth_dev *dev)
static void __attribute__((cold))
fm10k_set_rx_function(struct rte_eth_dev *dev)
{
- struct fm10k_dev_info *dev_info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_dev_info *dev_info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
uint16_t i, rx_using_sse;
uint16_t rx_ftag_en = 0;
- if (fm10k_check_ftag(dev->pci_dev->device.devargs))
+ if (fm10k_check_ftag(dev->device->devargs))
rx_ftag_en = 1;
/* In order to allow Vector Rx there are a few configuration
@@ -2797,6 +2845,9 @@ fm10k_set_rx_function(struct rte_eth_dev *dev)
else
PMD_INIT_LOG(DEBUG, "Use regular Rx func");
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
@@ -2809,7 +2860,8 @@ static void
fm10k_params_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct fm10k_dev_info *info = FM10K_DEV_PRIVATE_TO_INFO(dev);
+ struct fm10k_dev_info *info =
+ FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
/* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
* there is no way to get link status without reading BAR4. Until this
@@ -2830,6 +2882,8 @@ static int
eth_fm10k_dev_init(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
int diag, i;
struct fm10k_macvlan_filter_info *macvlan;
@@ -2838,23 +2892,31 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
dev->dev_ops = &fm10k_eth_dev_ops;
dev->rx_pkt_burst = &fm10k_recv_pkts;
dev->tx_pkt_burst = &fm10k_xmit_pkts;
+ dev->tx_pkt_prepare = &fm10k_prep_pkts;
- /* only initialize in the primary process */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ /*
+ * Primary process does the whole initialization, for secondary
+ * processes, we just select the same Rx and Tx function as primary.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ fm10k_set_rx_function(dev);
+ fm10k_set_tx_function(dev);
return 0;
+ }
- rte_eth_copy_pci_info(dev, dev->pci_dev);
+ rte_eth_copy_pci_info(dev, pdev);
+ dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
memset(macvlan, 0, sizeof(*macvlan));
/* Vendor and Device ID need to be set before init of shared code */
memset(hw, 0, sizeof(*hw));
- hw->device_id = dev->pci_dev->id.device_id;
- hw->vendor_id = dev->pci_dev->id.vendor_id;
- hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
- hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
+ hw->device_id = pdev->id.device_id;
+ hw->vendor_id = pdev->id.vendor_id;
+ hw->subsystem_device_id = pdev->id.subsystem_device_id;
+ hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
hw->revision_id = 0;
- hw->hw_addr = (void *)dev->pci_dev->mem_resource[0].addr;
+ hw->hw_addr = (void *)pdev->mem_resource[0].addr;
if (hw->hw_addr == NULL) {
PMD_INIT_LOG(ERR, "Bad mem resource."
" Try to blacklist unused devices.");
@@ -2924,20 +2986,20 @@ eth_fm10k_dev_init(struct rte_eth_dev *dev)
/*PF/VF has different interrupt handling mechanism */
if (hw->mac.type == fm10k_mac_pf) {
/* register callback func to eal lib */
- rte_intr_callback_register(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_register(intr_handle,
fm10k_dev_interrupt_handler_pf, (void *)dev);
/* enable MISC interrupt */
fm10k_dev_enable_intr_pf(dev);
} else { /* VF */
- rte_intr_callback_register(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_register(intr_handle,
fm10k_dev_interrupt_handler_vf, (void *)dev);
fm10k_dev_enable_intr_vf(dev);
}
/* Enable intr after callback registered */
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
hw->mac.ops.update_int_moderator(hw);
@@ -3007,7 +3069,8 @@ static int
eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
{
struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pdev->intr_handle;
PMD_INIT_FUNC_TRACE();
/* only uninitialize in the primary process */
@@ -3022,7 +3085,7 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
dev->tx_pkt_burst = NULL;
/* disable uio/vfio intr */
- rte_intr_disable(&(dev->pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/*PF/VF has different interrupt handling mechanism */
if (hw->mac.type == fm10k_mac_pf) {
@@ -3030,13 +3093,13 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
fm10k_dev_disable_intr_pf(dev);
/* unregister callback func to eal lib */
- rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_unregister(intr_handle,
fm10k_dev_interrupt_handler_pf, (void *)dev);
} else {
/* disable interrupt */
fm10k_dev_disable_intr_vf(dev);
- rte_intr_callback_unregister(&(dev->pci_dev->intr_handle),
+ rte_intr_callback_unregister(intr_handle,
fm10k_dev_interrupt_handler_vf, (void *)dev);
}
@@ -3051,6 +3114,18 @@ eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
return 0;
}
+static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
+}
+
+static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
+}
+
/*
* The set of PCI devices this driver supports. This driver will enable both PF
* and SRIOV-VF devices.
@@ -3062,18 +3137,13 @@ static const struct rte_pci_id pci_id_fm10k_map[] = {
{ .vendor_id = 0, /* sentinel */ },
};
-static struct eth_driver rte_pmd_fm10k = {
- .pci_drv = {
- .id_table = pci_id_fm10k_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_fm10k_dev_init,
- .eth_dev_uninit = eth_fm10k_dev_uninit,
- .dev_private_size = sizeof(struct fm10k_adapter),
+static struct rte_pci_driver rte_pmd_fm10k = {
+ .id_table = pci_id_fm10k_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_fm10k_pci_probe,
+ .remove = eth_fm10k_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k.pci_drv);
+RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c
index 32cc7ff9..c9bb04a0 100644
--- a/drivers/net/fm10k/fm10k_rxtx.c
+++ b/drivers/net/fm10k/fm10k_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <rte_ethdev.h>
#include <rte_common.h>
+#include <rte_net.h>
#include "fm10k.h"
#include "base/fm10k_type.h"
@@ -65,6 +66,15 @@ static inline void dump_rxd(union fm10k_rx_desc *rxd)
}
#endif
+#define FM10K_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define FM10K_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK)
+
/* @note: When this function is changed, make corresponding change to
* fm10k_dev_supported_ptypes_get()
*/
@@ -424,12 +434,12 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
if (unlikely(num == 0))
return;
- m = __rte_pktmbuf_prefree_seg(txep[0]);
+ m = rte_pktmbuf_prefree_seg(txep[0]);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < num; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i]);
+ m = rte_pktmbuf_prefree_seg(txep[i]);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -445,7 +455,7 @@ static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < num; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i]);
+ m = rte_pktmbuf_prefree_seg(txep[i]);
if (m != NULL)
rte_mempool_put(m->pool, m);
txep[i] = NULL;
@@ -597,3 +607,41 @@ fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
return count;
}
+
+uint16_t
+fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ if ((m->ol_flags & PKT_TX_TCP_SEG) &&
+ (m->tso_segsz < FM10K_TSO_MINMSS)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c
index 27f3e43f..411bc445 100644
--- a/drivers/net/fm10k/fm10k_rxtx_vec.c
+++ b/drivers/net/fm10k/fm10k_rxtx_vec.c
@@ -324,9 +324,6 @@ fm10k_rxq_rearm(struct fm10k_rx_queue *rxq)
/* Flush mbuf with pkt template.
* Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
*/
p0 = (uintptr_t)&mb0->rearm_data;
*(uint64_t *)p0 = rxq->mbuf_initializer;
@@ -470,9 +467,13 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
__m128i descs0[RTE_FM10K_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ __m128i mbp1;
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&mbufp[pos]);
/* Read desc statuses backwards to avoid race condition */
@@ -480,11 +481,13 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf poitns */
mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
+#endif
descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -493,8 +496,10 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs0[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
/* avoid compiler reorder optimization */
rte_compiler_barrier();
@@ -754,12 +759,12 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
* next_dd - (rs_thresh-1)
*/
txep = &txq->sw_ring[txq->next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0]);
+ m = rte_pktmbuf_prefree_seg(txep[0]);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i]);
+ m = rte_pktmbuf_prefree_seg(txep[i]);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -774,7 +779,7 @@ fm10k_tx_free_bufs(struct fm10k_tx_queue *txq)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i]);
+ m = rte_pktmbuf_prefree_seg(txep[i]);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
@@ -800,8 +805,8 @@ tx_backlog_entry(struct rte_mbuf **txep,
}
uint16_t
-fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+fm10k_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
volatile struct fm10k_tx_desc *txdp;
diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile
index 13085fb7..56f210d6 100644
--- a/drivers/net/i40e/Makefile
+++ b/drivers/net/i40e/Makefile
@@ -1,6 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+# Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -38,7 +38,7 @@ LIB = librte_pmd_i40e.a
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS) -DPF_DRIVER -DVF_DRIVER -DINTEGRATED_VF
-CFLAGS += -DX722_SUPPORT -DX722_A0_SUPPORT
+CFLAGS += -DX722_A0_SUPPORT
EXPORT_MAP := rte_pmd_i40e_version.map
@@ -99,23 +99,23 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
+else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
else
SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
endif
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c
SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += rte_pmd_i40e.c
# vector PMD driver needs SSE4.1 support
ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),)
CFLAGS_i40e_rxtx_vec_sse.o += -msse4.1
endif
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_I40E_PMD)-include := rte_pmd_i40e.h
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/i40e/base/README b/drivers/net/i40e/base/README
new file mode 100644
index 00000000..0da9f674
--- /dev/null
+++ b/drivers/net/i40e/base/README
@@ -0,0 +1,59 @@
+..
+ BSD LICENSE
+
+ Copyright(c) 2017 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Intel® I40E driver
+==================
+
+This directory contains source code of FreeBSD i40e driver of version
+cid-i40e.2017.03.21.tar.gz released by the team which develops
+basic drivers for any i40e NIC. The directory of base/ contains the
+original source package.
+This driver is valid for the product(s) listed below
+
+* Intel® Ethernet Converged Network Adapters X710
+* Intel® Ethernet Converged Network Adapters XL710
+* Intel® Ethernet Network Adapter XXV710
+* Intel® Ethernet Connection X722 for 10GBASE-T
+* Intel® Ethernet Connection X722 for 10GbE backplane
+* Intel® Ethernet Connection X722 for 10GbE SFP+
+* Intel® Ethernet Connection X722 for 1GbE
+* Intel® Ethernet Controller X710 and XL710 Family
+* Intel® Ethernet Controller XXV710 for 25GbE backplane
+* Intel® Ethernet Controller XXV710 for 25GbE SFP28
+
+Updating the driver
+===================
+
+NOTE: The source code in this directory should not be modified apart from
+the following file(s):
+
+ i40e_osdep.h
diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c
index 0d3a83fa..a60292a3 100644
--- a/drivers/net/i40e/base/i40e_adminq.c
+++ b/drivers/net/i40e/base/i40e_adminq.c
@@ -944,8 +944,8 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
*/
if (i40e_asq_done(hw))
break;
- i40e_msec_delay(1);
- total_delay++;
+ i40e_usec_delay(50);
+ total_delay += 50;
} while (total_delay < hw->aq.asq_cmd_timeout);
}
@@ -1077,11 +1077,11 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
desc_idx = ntc;
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
flags = LE16_TO_CPU(desc->flags);
if (flags & I40E_AQ_FLAG_ERR) {
ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)LE16_TO_CPU(desc->retval);
i40e_debug(hw,
I40E_DEBUG_AQ_MESSAGE,
"AQRX: Event received with error 0x%X.\n",
diff --git a/drivers/net/i40e/base/i40e_adminq.h b/drivers/net/i40e/base/i40e_adminq.h
index 750973c5..182e40b9 100644
--- a/drivers/net/i40e/base/i40e_adminq.h
+++ b/drivers/net/i40e/base/i40e_adminq.h
@@ -158,9 +158,9 @@ STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
/* general information */
#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
#ifdef I40E_ESS_SUPPORT
-#define I40E_ASQ_CMD_TIMEOUT_ESS 50000 /* msecs */
+#define I40E_ASQ_CMD_TIMEOUT_ESS 50000000 /* usecs */
#endif
void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h
index 4f067720..09f5bf5c 100644
--- a/drivers/net/i40e/base/i40e_adminq_cmd.h
+++ b/drivers/net/i40e/base/i40e_adminq_cmd.h
@@ -139,12 +139,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
-#ifdef X722_SUPPORT
/* Proxy commands */
i40e_aqc_opc_set_proxy_config = 0x0104,
i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
-#endif
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -152,12 +150,11 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
-#ifdef X722_SUPPORT
/* WoL commands */
i40e_aqc_opc_set_wol_filter = 0x0120,
i40e_aqc_opc_get_wake_reason = 0x0121,
+ i40e_aqc_opc_clear_all_wol_filters = 0x025E,
-#endif
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -197,10 +194,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
+ i40e_aqc_opc_replace_cloud_filters = 0x025F,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
+ /* Dynamic Device Personalization */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -282,12 +284,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
-#ifdef X722_SUPPORT
i40e_aqc_opc_set_rss_key = 0x0B02,
i40e_aqc_opc_set_rss_lut = 0x0B03,
i40e_aqc_opc_get_rss_key = 0x0B04,
i40e_aqc_opc_get_rss_lut = 0x0B05,
-#endif
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -540,7 +540,8 @@ struct i40e_aqc_mac_address_read {
#define I40E_AQC_PORT_ADDR_VALID 0x40
#define I40E_AQC_WOL_ADDR_VALID 0x80
#define I40E_AQC_MC_MAG_EN_VALID 0x100
-#define I40E_AQC_ADDR_VALID_MASK 0x1F0
+#define I40E_AQC_WOL_PRESERVE_STATUS 0x200
+#define I40E_AQC_ADDR_VALID_MASK 0x3F0
u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
@@ -561,6 +562,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
struct i40e_aqc_mac_address_write {
__le16 command_flags;
#define I40E_AQC_MC_MAG_EN 0x0100
+#define I40E_AQC_WOL_PRESERVE_ON_PFR 0x0200
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@@ -584,7 +586,6 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
-#ifdef X722_SUPPORT
/* Set WoL Filter (0x0120) */
struct i40e_aqc_set_wol_filter {
@@ -600,6 +601,7 @@ struct i40e_aqc_set_wol_filter {
__le16 cmd_flags;
#define I40E_AQC_SET_WOL_FILTER 0x8000
#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000
#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
__le16 valid_flags;
@@ -635,7 +637,6 @@ struct i40e_aqc_get_wake_reason_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
-#endif /* X722_SUPPORT */
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -774,6 +775,7 @@ struct i40e_aqc_set_switch_config {
/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
+#define I40E_AQ_SET_SWITCH_CFG_HW_ATR_EVICT 0x0004
__le16 valid_flags;
u8 reserved[12];
};
@@ -940,16 +942,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
-#ifdef X722_SUPPORT
#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
-#endif
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
-#ifdef X722_SUPPORT
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
-#endif
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1332,7 +1330,9 @@ struct i40e_aqc_add_remove_cloud_filters {
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
+ u8 big_buffer_flag;
+#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
+ u8 reserved2[3];
__le32 addr_high;
__le32 addr_low;
};
@@ -1369,6 +1369,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x0010 to 0x0017 is for custom filters */
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
@@ -1403,6 +1404,46 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 response_reserved[7];
};
+/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when
+ * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set. refer to
+ * DCR288
+ */
+struct i40e_aqc_add_rm_cloud_filt_elem_ext {
+ struct i40e_aqc_add_remove_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
@@ -1414,6 +1455,54 @@ struct i40e_aqc_remove_cloud_filters_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+/* Replace filter Command 0x025F
+ * uses the i40e_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+struct i40e_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+#define I40E_AQC_REPLACE_L1_FILTER 0x0
+#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
+#define I40E_AQC_GET_CLOUD_FILTERS 0x2
+#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
+#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+/* Filter type INPUT codes*/
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL)
+
+/* Field Vector offsets */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
+ struct i40e_filter_data filters[8];
+};
+
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
@@ -1449,6 +1538,36 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
+/* Dynamic Device Personalization */
+struct i40e_aqc_write_personalization_profile {
+ u8 flags;
+ u8 reserved[3];
+ __le32 profile_track_id;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile);
+
+struct i40e_aqc_write_ddp_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct i40e_aqc_get_applied_profiles {
+ u8 flags;
+#define I40E_AQC_GET_DDP_GET_CONF 0x1
+#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2
+ u8 rsv[3];
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles);
+
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1781,11 +1900,20 @@ struct i40e_aq_get_phy_abilities_resp {
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0x01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0x02
#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
- u8 mod_type_ext;
+ u8 fec_cfg_curr_mod_ext_info;
+#define I40E_AQ_ENABLE_FEC_KR 0x01
+#define I40E_AQ_ENABLE_FEC_RS 0x02
+#define I40E_AQ_REQUEST_FEC_KR 0x04
+#define I40E_AQ_REQUEST_FEC_RS 0x08
+#define I40E_AQ_ENABLE_FEC_AUTO 0x10
+#define I40E_AQ_FEC
+#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0
+#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5
+
u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
@@ -1809,16 +1937,14 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le32 eeer;
u8 low_power_ctrl;
u8 phy_type_ext;
-#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
-#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
-#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
-#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
u8 fec_config;
-#define I40E_AQ_SET_FEC_ABILITY_KR (1 << 0)
-#define I40E_AQ_SET_FEC_ABILITY_RS (1 << 1)
-#define I40E_AQ_SET_FEC_REQUEST_KR (1 << 2)
-#define I40E_AQ_SET_FEC_REQUEST_RS (1 << 3)
-#define I40E_AQ_SET_FEC_AUTO (1 << 4)
+#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0)
+#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1)
+#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2)
+#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3)
+#define I40E_AQ_SET_FEC_AUTO BIT(4)
+#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0
+#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT)
u8 reserved;
};
@@ -2416,7 +2542,6 @@ struct i40e_aqc_del_udp_tunnel_completion {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
-#ifdef X722_SUPPORT
struct i40e_aqc_get_set_rss_key {
#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
@@ -2457,7 +2582,6 @@ struct i40e_aqc_get_set_rss_lut {
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
-#endif
/* tunnel key structure 0x0B10 */
diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c
index 9a6b3ed6..03e94bc8 100644
--- a/drivers/net/i40e/base/i40e_common.c
+++ b/drivers/net/i40e/base/i40e_common.c
@@ -71,7 +71,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
-#ifdef X722_SUPPORT
#ifdef X722_A0_SUPPORT
case I40E_DEV_ID_X722_A0:
#endif
@@ -83,18 +82,14 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
-#endif
-#ifdef X722_SUPPORT
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_X722_VF:
- case I40E_DEV_ID_X722_VF_HV:
#ifdef X722_A0_SUPPORT
case I40E_DEV_ID_X722_A0_VF:
#endif
hw->mac.type = I40E_MAC_X722_VF;
break;
#endif /* INTEGRATED_VF || VF_DRIVER */
-#endif /* X722_SUPPORT */
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
@@ -114,7 +109,6 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
return status;
}
-#ifndef I40E_NDIS_SUPPORT
/**
* i40e_aq_str - convert AQ err code to a string
* @hw: pointer to the HW structure
@@ -321,7 +315,6 @@ const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err)
return hw->err_str;
}
-#endif /* I40E_NDIS_SUPPORT */
/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
@@ -447,7 +440,6 @@ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
-#ifdef X722_SUPPORT
/**
* i40e_aq_get_set_rss_lut
@@ -606,7 +598,6 @@ enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-#endif /* X722_SUPPORT */
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
@@ -1022,9 +1013,7 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
-#ifdef X722_SUPPORT
case I40E_MAC_X722:
-#endif
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -1044,11 +1033,9 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
-#ifdef X722_SUPPORT
if (hw->mac.type == I40E_MAC_X722)
hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
-#endif
status = i40e_init_nvm(hw);
return status;
}
@@ -1126,7 +1113,8 @@ enum i40e_status_code i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
+ i40e_memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac),
+ I40E_NONDMA_TO_NONDMA);
return status;
}
@@ -1149,7 +1137,8 @@ enum i40e_status_code i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
- memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+ i40e_memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac),
+ I40E_NONDMA_TO_NONDMA);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1207,7 +1196,8 @@ enum i40e_status_code i40e_get_san_mac_addr(struct i40e_hw *hw,
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
- memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+ i40e_memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac),
+ I40E_NONDMA_TO_NONDMA);
else
status = I40E_ERR_INVALID_MAC_ADDR;
@@ -1288,6 +1278,8 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_1000BASE_LX:
case I40E_PHY_TYPE_40GBASE_SR4:
case I40E_PHY_TYPE_40GBASE_LR4:
+ case I40E_PHY_TYPE_25GBASE_LR:
+ case I40E_PHY_TYPE_25GBASE_SR:
media = I40E_MEDIA_TYPE_FIBER;
break;
case I40E_PHY_TYPE_100BASE_TX:
@@ -1302,6 +1294,7 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_40GBASE_AOC:
case I40E_PHY_TYPE_10GBASE_AOC:
+ case I40E_PHY_TYPE_25GBASE_CR:
media = I40E_MEDIA_TYPE_DA;
break;
case I40E_PHY_TYPE_1000BASE_KX:
@@ -1309,6 +1302,7 @@ STATIC enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
+ case I40E_PHY_TYPE_25GBASE_KR:
media = I40E_MEDIA_TYPE_BACKPLANE;
break;
case I40E_PHY_TYPE_SGMII:
@@ -1789,10 +1783,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
/* Copy over all the old settings */
config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
config.link_speed = abilities.link_speed;
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status)
@@ -1952,6 +1949,8 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->link_speed = (enum i40e_aq_link_speed)resp->link_speed;
hw_link_info->link_info = resp->link_info;
hw_link_info->an_info = resp->an_info;
+ hw_link_info->fec_info = resp->config & (I40E_AQ_CONFIG_FEC_KR_ENA |
+ I40E_AQ_CONFIG_FEC_RS_ENA);
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
hw_link_info->max_frame_size = LE16_TO_CPU(resp->max_frame_size);
@@ -1974,12 +1973,13 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
else
hw_link_info->crc_enable = false;
- if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_ENABLE))
+ if (resp->command_flags & CPU_TO_LE16(I40E_AQ_LSE_IS_ENABLED))
hw_link_info->lse_enable = true;
else
hw_link_info->lse_enable = false;
- if ((hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
+ if ((hw->mac.type == I40E_MAC_XL710) &&
+ (hw->aq.fw_maj_ver < 4 || (hw->aq.fw_maj_ver == 4 &&
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
@@ -2344,6 +2344,43 @@ enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+* i40e_aq_set_vsi_full_promiscuous
+* @hw: pointer to the hw struct
+* @seid: VSI number
+* @set: set promiscuous enable/disable
+* @cmd_details: pointer to command details structure or NULL
+**/
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (set)
+ flags = I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_UNICAST |
+ I40E_AQC_SET_VSI_PROMISC_MULTICAST |
+ I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+
+ cmd->seid = CPU_TO_LE16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_mc_promisc_on_vlan
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2412,6 +2449,40 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_bc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set broadcast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_BROADCAST;
+
+ cmd->promiscuous_flags = CPU_TO_LE16(flags);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_BROADCAST);
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->vlan_tag = CPU_TO_LE16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2745,14 +2816,17 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- if (hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) {
+ /* extra checking needed to ensure link info to user is timely */
+ if ((hw->phy.link_info.link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ ((hw->phy.link_info.link_info & I40E_AQ_LINK_UP) ||
+ !(hw->phy.link_info_old.link_info & I40E_AQ_LINK_UP))) {
status = i40e_aq_get_phy_capabilities(hw, false, false,
&abilities, NULL);
if (status)
return status;
- memcpy(hw->phy.link_info.module_type, &abilities.module_type,
- sizeof(hw->phy.link_info.module_type));
+ i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
+ sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
}
return status;
}
@@ -3603,6 +3677,14 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MNG_MODE:
p->management_mode = number;
+ if (major_rev > 1) {
+ p->mng_protocols_over_mctp = logical_id;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: Protocols over MCTP = %d\n",
+ p->mng_protocols_over_mctp);
+ } else {
+ p->mng_protocols_over_mctp = 0;
+ }
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: Management Mode = %d\n",
p->management_mode);
@@ -3822,7 +3904,6 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
p->update_disabled = true;
break;
-#ifdef X722_SUPPORT
case I40E_AQ_CAP_ID_WOL_AND_PROXY:
hw->num_wol_proxy_filters = (u16)number;
hw->wol_proxy_vsi_seid = (u16)logical_id;
@@ -3832,12 +3913,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
else
p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
- p->proxy_support = p->proxy_support;
i40e_debug(hw, I40E_DEBUG_INIT,
"HW Capability: WOL proxy filters = %d\n",
hw->num_wol_proxy_filters);
break;
-#endif
default:
break;
}
@@ -3874,8 +3953,10 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
/* partition id is 1-based, and functions are evenly spread
* across the ports as partitions
*/
- hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
- hw->num_partitions = num_functions / hw->num_ports;
+ if (hw->num_ports != 0) {
+ hw->partition_id = (hw->pf_id / hw->num_ports) + 1;
+ hw->num_partitions = num_functions / hw->num_ports;
+ }
/* additional HW specific goodies that might
* someday be HW version specific
@@ -4360,11 +4441,15 @@ enum i40e_status_code i40e_aq_start_stop_dcbx(struct i40e_hw *hw,
/**
* i40e_aq_add_udp_tunnel
* @hw: pointer to the hw struct
- * @udp_port: the UDP port to add
+ * @udp_port: the UDP port to add in Host byte order
* @header_len: length of the tunneling header length in DWords
* @protocol_index: protocol index type
* @filter_index: pointer to filter index
* @cmd_details: pointer to command details structure or NULL
+ *
+ * Note: Firmware expects the udp_port value to be in Little Endian format,
+ * and this function will call CPU_TO_LE16 to convert from Host byte order to
+ * Little Endian order.
**/
enum i40e_status_code i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
u16 udp_port, u8 protocol_index,
@@ -5548,6 +5633,59 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
}
/**
+ * i40e_aq_add_cloud_filters_big_buffer
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
+ * the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+
+ /* adjust Geneve VNI for HW issue */
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
* i40e_aq_remove_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to remove cloud filters from
@@ -5560,9 +5698,9 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
*
**/
enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count)
+ u16 seid,
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
@@ -5587,6 +5725,103 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
}
/**
+ * i40e_aq_remove_cloud_filters_big_buffer
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_add_rm_cloud_filt_elem_ext are filled in by the caller of
+ * the function.
+ *
+ **/
+enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
+ struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER;
+
+ /* adjust Geneve VNI for HW issue */
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_replace_cloud_filters - Replace cloud filter command
+ * @hw: pointer to the hw struct
+ * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
+ * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
+ *
+ **/
+enum
+i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_replace_cloud_filters_cmd *cmd =
+ (struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
+ enum i40e_status_code status = I40E_SUCCESS;
+ int i = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_replace_cloud_filters);
+
+ desc.datalen = CPU_TO_LE16(32);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->old_filter_type = filters->old_filter_type;
+ cmd->new_filter_type = filters->new_filter_type;
+ cmd->valid_flags = filters->valid_flags;
+ cmd->tr_bit = filters->tr_bit;
+
+ status = i40e_asq_send_command(hw, &desc, cmd_buf,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL);
+
+ /* for get cloud filters command */
+ for (i = 0; i < 32; i += 4) {
+ cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
+ cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
+ cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
+ cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
+ }
+
+ return status;
+}
+
+
+/**
* i40e_aq_alternate_write
* @hw: pointer to the hardware structure
* @reg_addr0: address of first dword to be read
@@ -6007,9 +6242,6 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
- if (bwd_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
-
desc.datalen = CPU_TO_LE16(bwd_size);
status = i40e_asq_send_command(hw, &desc, bw_data, bwd_size, cmd_details);
@@ -6018,7 +6250,92 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
}
/**
- * i40e_read_phy_register
+ * i40e_read_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "PHY: Can't write command to external PHY.\n");
+ } else {
+ command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
+ *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
+ I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_write_phy_register_clause22
+ * @hw: pointer to the HW structure
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes specified PHY register value
+ **/
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status = I40E_ERR_TIMEOUT;
+ u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ u32 command = 0;
+ u16 retry = 1000;
+
+ command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
+ wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
+
+ command = (reg << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
+ (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
+ (I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE22_STCODE_MASK) |
+ (I40E_GLGEN_MSCA_MDICMD_MASK);
+
+ wr32(hw, I40E_GLGEN_MSCA(port_num), command);
+ do {
+ command = rd32(hw, I40E_GLGEN_MSCA(port_num));
+ if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
+ status = I40E_SUCCESS;
+ break;
+ }
+ i40e_usec_delay(10);
+ retry--;
+ } while (retry);
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -6027,9 +6344,8 @@ enum i40e_status_code i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 *value)
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -6039,8 +6355,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -6062,8 +6378,8 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_READ) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_READ_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -6093,7 +6409,7 @@ phy_read_end:
}
/**
- * i40e_write_phy_register
+ * i40e_write_phy_register_clause45
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
@@ -6102,9 +6418,8 @@ phy_read_end:
*
* Writes value to specified PHY register
**/
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr,
- u16 value)
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
enum i40e_status_code status = I40E_ERR_TIMEOUT;
u32 command = 0;
@@ -6114,8 +6429,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_ADDRESS) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
@@ -6139,8 +6454,8 @@ enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
- (I40E_MDIO_OPCODE_WRITE) |
- (I40E_MDIO_STCODE) |
+ (I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK) |
+ (I40E_MDIO_CLAUSE45_STCODE_MASK) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
@@ -6161,6 +6476,78 @@ phy_write_end:
}
/**
+ * i40e_write_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Writes value to specified PHY register
+ **/
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_write_phy_register_clause22(hw,
+ reg, phy_addr, value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_write_phy_register_clause45(hw,
+ page, reg, phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * i40e_read_phy_register
+ * @hw: pointer to the HW structure
+ * @page: registers page number
+ * @reg: register address in the page
+ * @phy_adr: PHY address on MDIO interface
+ * @value: PHY register value
+ *
+ * Reads specified PHY register value
+ **/
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
+{
+ enum i40e_status_code status;
+
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
+ value);
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
+ status = i40e_read_phy_register_clause45(hw, page, reg,
+ phy_addr, value);
+ break;
+ default:
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ }
+
+ return status;
+}
+
+/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
@@ -6202,14 +6589,16 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
led_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ &led_reg);
if (status)
goto phy_blinking_end;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
@@ -6221,20 +6610,18 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
if (time > 0 && interval > 0) {
for (i = 0; i < time * 1000; i += interval) {
- status = i40e_read_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- &led_reg);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (led_reg & I40E_PHY_LED_MANUAL_ON)
led_reg = 0;
else
led_reg = I40E_PHY_LED_MANUAL_ON;
- status = i40e_write_phy_register(hw,
- I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr,
- led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
i40e_msec_delay(interval);
@@ -6242,8 +6629,9 @@ enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
}
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
phy_blinking_end:
return status;
@@ -6274,8 +6662,10 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- temp_addr, phy_addr, &reg_val);
+ status = i40e_read_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ temp_addr, phy_addr,
+ &reg_val);
if (status)
return status;
*val = reg_val;
@@ -6308,41 +6698,42 @@ enum i40e_status_code i40e_led_set_phy(struct i40e_hw *hw, bool on,
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
-
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw,
+ I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr,
+ led_reg);
if (status)
return status;
}
- status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, &led_reg);
+ status = i40e_read_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
- led_addr, phy_addr, led_reg);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
- status = i40e_write_phy_register(hw,
+ status = i40e_write_phy_register_clause45(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
}
return status;
restore_config:
- status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
- phy_addr, led_ctl);
+ status = i40e_write_phy_register_clause45(hw, I40E_PHY_COM_REG_PAGE,
+ led_addr, phy_addr, led_ctl);
return status;
}
#endif /* PF_DRIVER */
@@ -6393,7 +6784,9 @@ u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
int retry = 5;
u32 val = 0;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
@@ -6452,7 +6845,9 @@ void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
bool use_register;
int retry = 5;
- use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
+ use_register = (((hw->aq.api_maj_ver == 1) &&
+ (hw->aq.api_min_ver < 5)) ||
+ (hw->mac.type == I40E_MAC_X722));
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
@@ -6568,7 +6963,6 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
I40E_SUCCESS, NULL, 0, NULL);
}
#endif /* VF_DRIVER */
-#ifdef X722_SUPPORT
/**
* i40e_aq_set_arp_proxy_config
@@ -6591,10 +6985,13 @@ enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_arp_proxy_data));
status = i40e_asq_send_command(hw, &desc, proxy_config,
sizeof(struct i40e_aqc_arp_proxy_data),
@@ -6625,10 +7022,13 @@ enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_ns_proxy_table_entry);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
desc.params.external.addr_high =
CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
desc.params.external.addr_low =
CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+ desc.datalen = CPU_TO_LE16(sizeof(struct i40e_aqc_ns_proxy_data));
status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
sizeof(struct i40e_aqc_ns_proxy_data),
@@ -6675,9 +7075,11 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
if (set_filter) {
if (!filter)
return I40E_ERR_PARAM;
+
cmd_flags |= I40E_AQC_SET_WOL_FILTER;
- buff_len = sizeof(*filter);
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR;
}
+
if (no_wol_tco)
cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
@@ -6688,6 +7090,12 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
cmd->valid_flags = CPU_TO_LE16(valid_flags);
+ buff_len = sizeof(*filter);
+ desc.datalen = CPU_TO_LE16(buff_len);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
@@ -6724,4 +7132,236 @@ enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
return status;
}
-#endif /* X722_SUPPORT */
+/**
+* i40e_aq_clear_all_wol_filters
+* @hw: pointer to the hw struct
+* @cmd_details: pointer to command details structure or NULL
+*
+* Get information for the reason of a Wake Up event
+**/
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_all_wol_filters);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+
+/**
+ * i40e_aq_write_ddp - Write dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @track_id: package tracking id
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_write_personalization_profile *cmd =
+ (struct i40e_aqc_write_personalization_profile *)
+ &desc.params.raw;
+ struct i40e_aqc_write_ddp_resp *resp;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_write_personalization_profile);
+
+ desc.flags |= CPU_TO_LE16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->profile_track_id = CPU_TO_LE32(track_id);
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ if (!status) {
+ resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw;
+ if (error_offset)
+ *error_offset = LE32_TO_CPU(resp->error_offset);
+ if (error_info)
+ *error_info = LE32_TO_CPU(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_ddp_list - Read dynamic device personalization (ddp)
+ * @hw: pointer to the hw struct
+ * @buff: command buffer (size in bytes = buff_size)
+ * @buff_size: buffer size in bytes
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum
+i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_applied_profiles *cmd =
+ (struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_personalization_profile_list);
+
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = CPU_TO_LE16(buff_size);
+
+ cmd->flags = flags;
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_find_segment_in_package
+ * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_hdr)
+{
+ struct i40e_generic_seg_header *segment;
+ u32 i;
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < pkg_hdr->segment_count; i++) {
+ segment =
+ (struct i40e_generic_seg_header *)((u8 *)pkg_hdr +
+ pkg_hdr->segment_offset[i]);
+
+ if (segment->type == segment_type)
+ return segment;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_write_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be downloaded
+ * @track_id: package tracking id
+ *
+ * Handles the download of a complete package.
+ */
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_section_table *sec_tbl;
+ struct i40e_profile_section_header *sec = NULL;
+ u32 dev_cnt;
+ u32 vendor_dev_id;
+ u32 *nvm;
+ u32 section_size = 0;
+ u32 offset = 0, info = 0;
+ u32 i;
+
+ if (!track_id) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Track_id can't be 0.");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == I40E_INTEL_VENDOR_ID)
+ if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ nvm = (u32 *)&profile->device_table[dev_cnt];
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec = (struct i40e_profile_section_header *)((u8 *)profile +
+ sec_tbl->section_offset[i]);
+
+ /* Skip 'AQ', 'note' and 'name' sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write profile */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: offset %d, info %d",
+ offset, info);
+ break;
+ }
+ }
+ return status;
+}
+
+/**
+ * i40e_add_pinfo_to_list
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
diff --git a/drivers/net/i40e/base/i40e_dcb.c b/drivers/net/i40e/base/i40e_dcb.c
index 26c344fd..9b5405db 100644
--- a/drivers/net/i40e/base/i40e_dcb.c
+++ b/drivers/net/i40e/base/i40e_dcb.c
@@ -396,6 +396,8 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
dcbcfg->numapps = length / sizeof(*app);
if (!dcbcfg->numapps)
return;
+ if (dcbcfg->numapps > I40E_DCBX_MAX_APPS)
+ dcbcfg->numapps = I40E_DCBX_MAX_APPS;
for (i = 0; i < dcbcfg->numapps; i++) {
u8 up, selector;
diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h
index 8bd5793d..4546689a 100644
--- a/drivers/net/i40e/base/i40e_devids.h
+++ b/drivers/net/i40e/base/i40e_devids.h
@@ -55,7 +55,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
#endif /* VF_DRIVER */
-#ifdef X722_SUPPORT
#ifdef X722_A0_SUPPORT
#define I40E_DEV_ID_X722_A0 0x374C
#if defined(INTEGRATED_VF) || defined(VF_DRIVER)
@@ -70,9 +69,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_DEV_ID_SFP_I_X722 0x37D3
#if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT)
#define I40E_DEV_ID_X722_VF 0x37CD
-#define I40E_DEV_ID_X722_VF_HV 0x37D9
#endif /* VF_DRIVER */
-#endif /* X722_SUPPORT */
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/i40e/base/i40e_lan_hmc.c b/drivers/net/i40e/base/i40e_lan_hmc.c
index 22606484..f03f3813 100644
--- a/drivers/net/i40e/base/i40e_lan_hmc.c
+++ b/drivers/net/i40e/base/i40e_lan_hmc.c
@@ -1239,11 +1239,6 @@ enum i40e_status_code i40e_hmc_get_object_va(struct i40e_hw *hw,
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
- if (NULL == hmc_info) {
- ret_code = I40E_ERR_BAD_PTR;
- DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info ptr\n");
- goto exit;
- }
if (NULL == hmc_info->hmc_obj) {
ret_code = I40E_ERR_BAD_PTR;
DEBUGOUT("i40e_hmc_get_object_va: bad hmc_info->hmc_obj ptr\n");
diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c
index 4fa1220b..e8965024 100644
--- a/drivers/net/i40e/base/i40e_nvm.c
+++ b/drivers/net/i40e/base/i40e_nvm.c
@@ -219,19 +219,15 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
- if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
- if (!ret_code) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- i40e_release_nvm(hw);
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
}
- } else {
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ i40e_release_nvm(hw);
}
-#else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#endif
return ret_code;
}
@@ -249,14 +245,10 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_word_aq(hw, offset, data);
else
ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
-#endif
return ret_code;
}
@@ -348,14 +340,10 @@ enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
else
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#endif
return ret_code;
}
@@ -375,7 +363,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
-#ifdef X722_SUPPORT
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (!ret_code) {
@@ -386,9 +373,6 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
} else {
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
-#else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
-#endif
return ret_code;
}
@@ -901,9 +885,20 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
*((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
}
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+
return I40E_SUCCESS;
}
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT:
status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
@@ -1253,6 +1248,7 @@ retry:
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
{
if (opcode == hw->nvm_wait_opcode) {
+
i40e_debug(hw, I40E_DEBUG_NVM,
"NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
if (hw->nvm_release_on_done) {
@@ -1261,6 +1257,11 @@ void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
}
hw->nvm_wait_opcode = 0;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
+
switch (hw->nvmupd_state) {
case I40E_NVMUPD_STATE_INIT_WAIT:
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
@@ -1423,7 +1424,8 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
if (hw->nvm_buff.va) {
buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
+ I40E_NONDMA_TO_NONDMA);
}
}
@@ -1496,7 +1498,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
__func__, cmd->offset, cmd->offset + len);
buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
+ i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
bytes += len;
remainder -= len;
@@ -1510,7 +1512,7 @@ STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
__func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
+ i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
}
return I40E_SUCCESS;
diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h
index 38e7ba5b..c57ecded 100644
--- a/drivers/net/i40e/base/i40e_osdep.h
+++ b/drivers/net/i40e/base/i40e_osdep.h
@@ -44,6 +44,7 @@
#include <rte_cycles.h>
#include <rte_spinlock.h>
#include <rte_log.h>
+#include <rte_io.h>
#include "../i40e_logs.h"
@@ -153,15 +154,18 @@ do { \
* I40E_PRTQF_FD_MSK
*/
-#define I40E_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define I40E_PCI_REG(reg) rte_read32(reg)
#define I40E_PCI_REG_ADDR(a, reg) \
((volatile uint32_t *)((char *)(a)->hw_addr + (reg)))
static inline uint32_t i40e_read_addr(volatile void *addr)
{
return rte_le_to_cpu_32(I40E_PCI_REG(addr));
}
-#define I40E_PCI_REG_WRITE(reg, value) \
- do { I40E_PCI_REG((reg)) = rte_cpu_to_le_32(value); } while (0)
+
+#define I40E_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+#define I40E_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
#define I40E_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_GLGEN_STAT)
#define I40EVF_WRITE_FLUSH(a) I40E_READ_REG(a, I40E_VFGEN_RSTAT)
diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h
index 3aab5ca9..4bd589e7 100644
--- a/drivers/net/i40e/base/i40e_prototype.h
+++ b/drivers/net/i40e/base/i40e_prototype.h
@@ -78,7 +78,6 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
-#ifdef X722_SUPPORT
enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
bool pf_lut, u8 *lut, u16 lut_size);
@@ -90,11 +89,8 @@ enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_get_set_rss_key_data *key);
-#endif
-#ifndef I40E_NDIS_SUPPORT
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
-#endif /* I40E_NDIS_SUPPORT */
#ifdef PF_DRIVER
@@ -172,12 +168,18 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_full_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
u16 seid, bool enable, u16 vid,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -402,11 +404,21 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
-
+enum i40e_status_code i40e_aq_add_cloud_filters_big_buffer(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count);
enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
+enum i40e_status_code i40e_aq_remove_cloud_filters_big_buffer(
+ struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *filters,
+ u8 filter_count);
+enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf);
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
@@ -521,7 +533,6 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-#ifdef X722_SUPPORT
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
struct i40e_asq_cmd_details *cmd_details);
@@ -537,12 +548,38 @@ enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
u16 *wake_reason,
struct i40e_asq_cmd_details *cmd_details);
-#endif
-enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 *value);
-enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
- u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_aq_clear_all_wol_filters(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
+enum i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details);
+struct i40e_generic_seg_header *
+i40e_find_segment_in_package(u32 segment_type,
+ struct i40e_package_header *pkg_header);
+enum i40e_status_code
+i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
+i40e_add_pinfo_to_list(struct i40e_hw *hw,
+ struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/i40e/base/i40e_register.h b/drivers/net/i40e/base/i40e_register.h
index fd0a7230..3a305b67 100644
--- a/drivers/net/i40e/base/i40e_register.h
+++ b/drivers/net/i40e/base/i40e_register.h
@@ -3401,7 +3401,6 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
-#ifdef X722_SUPPORT
#ifdef PF_DRIVER
#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
@@ -5366,5 +5365,4 @@ POSSIBILITY OF SUCH DAMAGE.
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#endif /* X722_SUPPORT */
#endif /* _I40E_REGISTER_H_ */
diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h
index b5f72c32..84d57576 100644
--- a/drivers/net/i40e/base/i40e_type.h
+++ b/drivers/net/i40e/base/i40e_type.h
@@ -133,6 +133,7 @@ enum i40e_debug_mask {
I40E_DEBUG_DCB = 0x00000400,
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
@@ -157,15 +158,22 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
-#define I40E_MDIO_STCODE I40E_MASK(0, \
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_INC_ADDR_MASK I40E_MASK(2, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_PHY_COM_REG_PAGE 0x1E
@@ -189,9 +197,7 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-#ifdef X722_SUPPORT
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#endif
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
@@ -205,13 +211,10 @@ enum i40e_memcpy_type {
*/
enum i40e_mac_type {
I40E_MAC_UNKNOWN = 0,
- I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
-#ifdef X722_SUPPORT
I40E_MAC_X722,
I40E_MAC_X722_VF,
-#endif
I40E_MAC_GENERIC,
};
@@ -266,6 +269,7 @@ struct i40e_link_status {
enum i40e_aq_link_speed link_speed;
u8 link_info;
u8 an_info;
+ u8 fec_info;
u8 ext_info;
u8 loopback;
/* is Link Status Event notification to SW enabled */
@@ -332,25 +336,35 @@ struct i40e_phy_info {
#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
-#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32)
-#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32)
+/*
+ * Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some
+ * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit
+ * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So,
+ * a shift is needed to adjust for this with values larger than 31. The
+ * only affected values are I40E_PHY_TYPE_25GBASE_*.
+ */
+#define I40E_PHY_TYPE_OFFSET 1
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \
+ I40E_PHY_TYPE_OFFSET)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \
+ I40E_PHY_TYPE_OFFSET)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
-#ifdef X722_SUPPORT
enum i40e_acpi_programming_method {
I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
};
-#define I40E_WOL_SUPPORT_MASK 1
-#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
-#define I40E_PROXY_SUPPORT_MASK (1 << 2)
+#define I40E_WOL_SUPPORT_MASK 0x1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK 0x2
+#define I40E_PROXY_SUPPORT_MASK 0x4
-#endif
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
@@ -359,6 +373,10 @@ struct i40e_hw_capabilities {
#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3
u32 management_mode;
+ u32 mng_protocols_over_mctp;
+#define I40E_MNG_PROTOCOL_PLDM 0x2
+#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4
+#define I40E_MNG_PROTOCOL_NCSI 0x8
u32 npar_enable;
u32 os2bmc;
u32 valid_functions;
@@ -414,11 +432,9 @@ struct i40e_hw_capabilities {
u32 enabled_tcmap;
u32 maxtc;
u64 wr_csr_prot;
-#ifdef X722_SUPPORT
bool apm_wol_support;
enum i40e_acpi_programming_method acpi_prog_method;
bool proxy_support;
-#endif
};
struct i40e_mac_info {
@@ -476,6 +492,7 @@ enum i40e_nvmupd_state {
I40E_NVMUPD_STATE_WRITING,
I40E_NVMUPD_STATE_INIT_WAIT,
I40E_NVMUPD_STATE_WRITE_WAIT,
+ I40E_NVMUPD_STATE_ERROR
};
/* nvm_access definition and its masks/shifts need to be accessible to
@@ -554,6 +571,7 @@ struct i40e_bus_info {
u16 func;
u16 device;
u16 lan_id;
+ u16 bus_id;
};
/* Flow control (FC) parameters */
@@ -678,30 +696,22 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
-#ifdef X722_SUPPORT
/* WoL and proxy support */
u16 num_wol_proxy_filters;
u16 wol_proxy_vsi_seid;
-#endif
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
u64 flags;
/* debug mask */
u32 debug_mask;
-#ifndef I40E_NDIS_SUPPORT
char err_str[16];
-#endif /* I40E_NDIS_SUPPORT */
};
STATIC INLINE bool i40e_is_vf(struct i40e_hw *hw)
{
-#ifdef X722_SUPPORT
return (hw->mac.type == I40E_MAC_VF ||
hw->mac.type == I40E_MAC_X722_VF);
-#else
- return hw->mac.type == I40E_MAC_VF;
-#endif
}
struct i40e_driver_version {
@@ -805,11 +815,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
-#ifdef X722_SUPPORT
I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
-#else
- I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
-#endif
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
@@ -817,11 +823,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
-#ifdef X722_SUPPORT
I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
-#else
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
-#endif
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
@@ -1199,10 +1201,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
-#ifdef X722_SUPPORT
#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
-#endif
struct i40e_nop_desc {
__le64 rsvd;
__le64 dtype_cmd;
@@ -1239,38 +1239,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
-#ifdef X722_SUPPORT
/* Note: Values 0-28 are reserved for future use.
* Value 29, 30, 32 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
-#else
- /* Note: Values 0-30 are reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
-#ifdef X722_SUPPORT
I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
-#else
- /* Note: Value 32 is reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
-#ifdef X722_SUPPORT
/* Note: Values 37-38 are reserved for future use.
* Value 39, 40, 42 are not supported on XL710 and X710.
*/
I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
-#else
- /* Note: Values 37-40 are reserved for future use */
-#endif
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
-#ifdef X722_SUPPORT
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
-#endif
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1325,12 +1311,10 @@ enum i40e_filter_program_desc_pcmd {
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
-#ifdef X722_SUPPORT
#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
I40E_TXD_FLTR_QW1_CMD_SHIFT)
#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
-#endif
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
@@ -1502,6 +1486,7 @@ struct i40e_hw_port_stats {
#define I40E_SR_EMPR_REGS_AUTO_LOAD_PTR 0x3A
#define I40E_SR_GLOBR_REGS_AUTO_LOAD_PTR 0x3B
#define I40E_SR_CORER_REGS_AUTO_LOAD_PTR 0x3C
+#define I40E_SR_PHY_ACTIVITY_LIST_PTR 0x3D
#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
#define I40E_SR_SW_CHECKSUM_WORD 0x3F
#define I40E_SR_1ST_FREE_PROVISION_AREA_PTR 0x40
@@ -1894,4 +1879,83 @@ struct i40e_lldp_variables {
#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
#define I40E_FLEX_57_SHIFT 6
#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
+
+/* Version format for Dynamic Device Personalization(DDP) */
+struct i40e_ddp_version {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define I40E_DDP_NAME_SIZE 32
+
+/* Package header */
+struct i40e_package_header {
+ struct i40e_ddp_version version;
+ u32 segment_count;
+ u32 segment_offset[1];
+};
+
+/* Generic segment header */
+struct i40e_generic_seg_header {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_NOTES 0x00000002
+#define SEGMENT_TYPE_I40E 0x00000011
+#define SEGMENT_TYPE_X722 0x00000012
+ u32 type;
+ struct i40e_ddp_version version;
+ u32 size;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_metadata_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+ u32 track_id;
+ char name[I40E_DDP_NAME_SIZE];
+};
+
+struct i40e_device_id_entry {
+ u32 vendor_dev_id;
+ u32 sub_vendor_dev_id;
+};
+
+struct i40e_profile_segment {
+ struct i40e_generic_seg_header header;
+ struct i40e_ddp_version version;
+ char name[I40E_DDP_NAME_SIZE];
+ u32 device_table_count;
+ struct i40e_device_id_entry device_table[1];
+};
+
+struct i40e_section_table {
+ u32 section_count;
+ u32 section_offset[1];
+};
+
+struct i40e_profile_section_header {
+ u16 tbl_size;
+ u16 data_end;
+ struct {
+#define SECTION_TYPE_INFO 0x00000010
+#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_NOTE 0x80000000
+#define SECTION_TYPE_NAME 0x80000001
+ u32 type;
+ u32 offset;
+ u32 size;
+ } section;
+};
+
+struct i40e_profile_info {
+ u32 track_id;
+ struct i40e_ddp_version version;
+ u8 op;
+#define I40E_DDP_ADD_TRACKID 0x01
+#define I40E_DDP_REMOVE_TRACKID 0x02
+ u8 reserved[7];
+ u8 name[I40E_DDP_NAME_SIZE];
+};
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h
index fd51ec32..7a24c0f1 100644
--- a/drivers/net/i40e/base/i40e_virtchnl.h
+++ b/drivers/net/i40e/base/i40e_virtchnl.h
@@ -170,6 +170,13 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP 0X00100000
+#define I40E_VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM 0X00200000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RX_ENCAP_CSUM 0X00400000
+
+#define I40E_VF_BASE_MODE_OFFLOADS (I40E_VIRTCHNL_VF_OFFLOAD_L2 | \
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index bf7e5a05..4c49673f 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -40,10 +40,12 @@
#include <inttypes.h>
#include <assert.h>
+#include <rte_eal.h>
#include <rte_string_fns.h>
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
@@ -51,6 +53,7 @@
#include <rte_dev.h>
#include <rte_eth_ctrl.h>
#include <rte_tailq.h>
+#include <rte_hash_crc.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -138,60 +141,6 @@
#define I40E_DEFAULT_DCB_APP_NUM 1
#define I40E_DEFAULT_DCB_APP_PRIO 3
-#define I40E_INSET_NONE 0x00000000000000000ULL
-
-/* bit0 ~ bit 7 */
-#define I40E_INSET_DMAC 0x0000000000000001ULL
-#define I40E_INSET_SMAC 0x0000000000000002ULL
-#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
-#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
-#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
-
-/* bit 8 ~ bit 15 */
-#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
-#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
-#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
-#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
-#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
-#define I40E_INSET_DST_PORT 0x0000000000002000ULL
-#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
-
-/* bit 16 ~ bit 31 */
-#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
-#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
-#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
-#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
-#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
-#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
-#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
-#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
-
-/* bit 32 ~ bit 47, tunnel fields */
-#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
-#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
-#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
-#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
-#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
-#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
-
-/* bit 48 ~ bit 55 */
-#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
-
-/* bit 56 ~ bit 63, Flex Payload */
-#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
-#define I40E_INSET_FLEX_PAYLOAD \
- (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
- I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
- I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
- I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
-
/**
* Below are values for writing un-exposed registers suggested
* by silicon experts
@@ -284,11 +233,6 @@
#define I40E_INSET_IPV6_HOP_LIMIT_MASK 0x000CFF00UL
#define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL
-#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
-#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
- I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
-
/* PCI offset for querying capability */
#define PCI_DEV_CAP_REG 0xA4
/* PCI offset for enabling/disabling Extended Tag */
@@ -324,6 +268,8 @@ static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev,
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx);
+static int i40e_fw_version_get(struct rte_eth_dev *dev,
+ char *fw_version, size_t fw_size);
static void i40e_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int i40e_vlan_filter_set(struct rte_eth_dev *dev,
@@ -345,10 +291,10 @@ static int i40e_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_fc_conf *fc_conf);
static int i40e_priority_flow_ctrl_set(struct rte_eth_dev *dev,
struct rte_eth_pfc_conf *pfc_conf);
-static void i40e_macaddr_add(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index,
- uint32_t pool);
+static int i40e_macaddr_add(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index,
+ uint32_t pool);
static void i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index);
static int i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -373,8 +319,7 @@ static void i40e_stat_update_48(struct i40e_hw *hw,
uint64_t *offset,
uint64_t *stat);
static void i40e_pf_config_irq0(struct i40e_hw *hw, bool no_queue);
-static void i40e_dev_interrupt_handler(
- __rte_unused struct rte_intr_handle *handle, void *param);
+static void i40e_dev_interrupt_handler(void *param);
static int i40e_res_pool_init(struct i40e_res_pool_info *pool,
uint32_t base, uint32_t num);
static void i40e_res_pool_destroy(struct i40e_res_pool_info *pool);
@@ -388,10 +333,6 @@ static struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf,
struct i40e_vsi *vsi);
static int i40e_pf_config_mq_rx(struct i40e_pf *pf);
static int i40e_vsi_config_double_vlan(struct i40e_vsi *vsi, int on);
-static inline int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
- struct i40e_macvlan_filter *mv_f,
- int num,
- struct ether_addr *addr);
static inline int i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
int num,
@@ -406,9 +347,6 @@ static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static void i40e_filter_input_set_init(struct i40e_pf *pf);
-static int i40e_ethertype_filter_set(struct i40e_pf *pf,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -461,6 +399,27 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev,
static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static int i40e_ethertype_filter_convert(
+ const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter);
+static int i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+
+static int i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter);
+static int i40e_cloud_filter_qinq_create(struct i40e_pf *pf);
+
+static void i40e_ethertype_filter_restore(struct i40e_pf *pf);
+static void i40e_tunnel_filter_restore(struct i40e_pf *pf);
+static void i40e_filter_restore(struct i40e_pf *pf);
+static void i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev);
+
+int i40e_logtype_init;
+int i40e_logtype_driver;
+
static const struct rte_pci_id pci_id_i40e_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QEMU) },
@@ -503,6 +462,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.stats_reset = i40e_dev_stats_reset,
.xstats_reset = i40e_dev_stats_reset,
.queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set,
+ .fw_version_get = i40e_fw_version_get,
.dev_infos_get = i40e_dev_info_get,
.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
.vlan_filter_set = i40e_vlan_filter_set,
@@ -520,6 +480,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = {
.rx_queue_release = i40e_dev_rx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
.rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .rx_descriptor_status = i40e_dev_rx_descriptor_status,
+ .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.dev_led_on = i40e_dev_led_on,
@@ -668,17 +630,23 @@ static const struct rte_i40e_xstats_name_off rte_i40e_txq_prio_strings[] = {
#define I40E_NB_TXQ_PRIO_XSTATS (sizeof(rte_i40e_txq_prio_strings) / \
sizeof(rte_i40e_txq_prio_strings[0]))
-static struct eth_driver rte_i40e_pmd = {
- .pci_drv = {
- .id_table = pci_id_i40e_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_i40e_dev_init,
- .eth_dev_uninit = eth_i40e_dev_uninit,
- .dev_private_size = sizeof(struct i40e_adapter),
+static int eth_i40e_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct i40e_adapter), eth_i40e_dev_init);
+}
+
+static int eth_i40e_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_i40e_dev_uninit);
+}
+
+static struct rte_pci_driver rte_i40e_pmd = {
+ .id_table = pci_id_i40e_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_i40e_pci_probe,
+ .remove = eth_i40e_pci_remove,
};
static inline int
@@ -709,8 +677,9 @@ rte_i40e_dev_atomic_write_link_status(struct rte_eth_dev *dev,
return 0;
}
-RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_i40e, rte_i40e_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e, "* igb_uio | uio_pci_generic | vfio");
#ifndef I40E_GLQF_ORT
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4))
@@ -718,6 +687,9 @@ RTE_PMD_REGISTER_PCI_TABLE(net_i40e, pci_id_i40e_map);
#ifndef I40E_GLQF_PIT
#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4))
#endif
+#ifndef I40E_GLQF_L3_MAP
+#define I40E_GLQF_L3_MAP(_i) (0x0026C700 + ((_i) * 4))
+#endif
static inline void i40e_GLQF_reg_init(struct i40e_hw *hw)
{
@@ -763,8 +735,8 @@ i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf)
pf->main_vsi_seid, 0,
TRUE, NULL, NULL);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to add filter to drop flow control "
- " frames from VSIs.");
+ PMD_INIT_LOG(ERR,
+ "Failed to add filter to drop flow control frames from VSIs.");
}
static int
@@ -907,7 +879,7 @@ is_floating_veb_supported(struct rte_devargs *devargs)
static void
config_floating_veb(struct rte_eth_dev *dev)
{
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -928,9 +900,144 @@ config_floating_veb(struct rte_eth_dev *dev)
#define I40E_L2_TAGS_S_TAG_MASK I40E_MASK(0x1, I40E_L2_TAGS_S_TAG_SHIFT)
static int
+i40e_init_ethtype_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ char ethertype_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters ethertype_hash_params = {
+ .name = ethertype_hash_name,
+ .entries = I40E_MAX_ETHERTYPE_FILTER_NUM,
+ .key_len = sizeof(struct i40e_ethertype_filter_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize ethertype filter rule list and hash */
+ TAILQ_INIT(&ethertype_rule->ethertype_list);
+ snprintf(ethertype_hash_name, RTE_HASH_NAMESIZE,
+ "ethertype_%s", dev->data->name);
+ ethertype_rule->hash_table = rte_hash_create(&ethertype_hash_params);
+ if (!ethertype_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create ethertype hash table!");
+ return -EINVAL;
+ }
+ ethertype_rule->hash_map = rte_zmalloc("i40e_ethertype_hash_map",
+ sizeof(struct i40e_ethertype_filter *) *
+ I40E_MAX_ETHERTYPE_FILTER_NUM,
+ 0);
+ if (!ethertype_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for ethertype hash map!");
+ ret = -ENOMEM;
+ goto err_ethertype_hash_map_alloc;
+ }
+
+ return 0;
+
+err_ethertype_hash_map_alloc:
+ rte_hash_free(ethertype_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_tunnel_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ char tunnel_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters tunnel_hash_params = {
+ .name = tunnel_hash_name,
+ .entries = I40E_MAX_TUNNEL_FILTER_NUM,
+ .key_len = sizeof(struct i40e_tunnel_filter_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize tunnel filter rule list and hash */
+ TAILQ_INIT(&tunnel_rule->tunnel_list);
+ snprintf(tunnel_hash_name, RTE_HASH_NAMESIZE,
+ "tunnel_%s", dev->data->name);
+ tunnel_rule->hash_table = rte_hash_create(&tunnel_hash_params);
+ if (!tunnel_rule->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create tunnel hash table!");
+ return -EINVAL;
+ }
+ tunnel_rule->hash_map = rte_zmalloc("i40e_tunnel_hash_map",
+ sizeof(struct i40e_tunnel_filter *) *
+ I40E_MAX_TUNNEL_FILTER_NUM,
+ 0);
+ if (!tunnel_rule->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for tunnel hash map!");
+ ret = -ENOMEM;
+ goto err_tunnel_hash_map_alloc;
+ }
+
+ return 0;
+
+err_tunnel_hash_map_alloc:
+ rte_hash_free(tunnel_rule->hash_table);
+
+ return ret;
+}
+
+static int
+i40e_init_fdir_filter_list(struct rte_eth_dev *dev)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ int ret;
+
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = I40E_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(struct rte_eth_fdir_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ /* Initialize flow director filter rule list and hash */
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", dev->data->name);
+ fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_table) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map",
+ sizeof(struct i40e_fdir_filter *) *
+ I40E_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ ret = -ENOMEM;
+ goto err_fdir_hash_map_alloc;
+ }
+ return 0;
+
+err_fdir_hash_map_alloc:
+ rte_hash_free(fdir_info->hash_table);
+
+ return ret;
+}
+
+static int
eth_i40e_dev_init(struct rte_eth_dev *dev)
{
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi;
@@ -943,6 +1050,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_pkt_burst = i40e_recv_pkts;
dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
@@ -952,9 +1060,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
i40e_set_tx_function(dev);
return 0;
}
- pci_dev = dev->pci_dev;
+ i40e_set_default_ptype_table(dev);
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
rte_eth_copy_pci_info(dev, pci_dev);
+ dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
pf->adapter->eth_dev = dev;
@@ -963,8 +1074,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
hw->back = I40E_PF_TO_ADAPTER(pf);
hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
if (!hw->hw_addr) {
- PMD_INIT_LOG(ERR, "Hardware is not available, "
- "as address is NULL");
+ PMD_INIT_LOG(ERR,
+ "Hardware is not available, as address is NULL");
return -ENODEV;
}
@@ -1021,6 +1132,12 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
((hw->nvm.version >> 4) & 0xff),
(hw->nvm.version & 0xf), hw->nvm.eetrack);
+ /* initialise the L3_MAP register */
+ ret = i40e_aq_debug_write_register(hw, I40E_GLQF_L3_MAP(40),
+ 0x00000028, NULL);
+ if (ret)
+ PMD_INIT_LOG(ERR, "Failed to write L3 MAP register %d", ret);
+
/* Need the special FW version to support floating VEB */
config_floating_veb(dev);
/* Clear PXE mode */
@@ -1100,8 +1217,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Set the global registers with default ether type value */
ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN);
if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(ERR, "Failed to set the default outer "
- "VLAN ether type");
+ PMD_INIT_LOG(ERR,
+ "Failed to set the default outer VLAN ether type");
goto err_setup_pf_switch;
}
@@ -1137,26 +1254,35 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* Should be after VSI initialized */
dev->data->mac_addrs = rte_zmalloc("i40e", len, 0);
if (!dev->data->mac_addrs) {
- PMD_INIT_LOG(ERR, "Failed to allocated memory "
- "for storing mac address");
+ PMD_INIT_LOG(ERR,
+ "Failed to allocated memory for storing mac address");
goto err_mac_alloc;
}
ether_addr_copy((struct ether_addr *)hw->mac.perm_addr,
&dev->data->mac_addrs[0]);
+ /* Init dcb to sw mode by default */
+ ret = i40e_dcb_init_configure(dev, TRUE);
+ if (ret != I40E_SUCCESS) {
+ PMD_INIT_LOG(INFO, "Failed to init dcb.");
+ pf->flags &= ~I40E_FLAG_DCB;
+ }
+ /* Update HW struct after DCB configuration */
+ i40e_get_cap(hw);
+
/* initialize pf host driver to setup SRIOV resource if applicable */
i40e_pf_host_init(dev);
/* register callback func to eal lib */
- rte_intr_callback_register(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_register(intr_handle,
+ i40e_dev_interrupt_handler, dev);
/* configure and enable device interrupt */
i40e_pf_config_irq0(hw, TRUE);
i40e_pf_enable_irq0(hw);
/* enable uio intr after callback register */
- rte_intr_enable(&(pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
/*
* Add an ethertype filter to drop all flow control frames transmitted
* from VSIs. By doing so, we stop VF from sending out PAUSE or PFC
@@ -1172,15 +1298,26 @@ eth_i40e_dev_init(struct rte_eth_dev *dev)
/* initialize mirror rule list */
TAILQ_INIT(&pf->mirror_list);
- /* Init dcb to sw mode by default */
- ret = i40e_dcb_init_configure(dev, TRUE);
- if (ret != I40E_SUCCESS) {
- PMD_INIT_LOG(INFO, "Failed to init dcb.");
- pf->flags &= ~I40E_FLAG_DCB;
- }
+ ret = i40e_init_ethtype_filter_list(dev);
+ if (ret < 0)
+ goto err_init_ethtype_filter_list;
+ ret = i40e_init_tunnel_filter_list(dev);
+ if (ret < 0)
+ goto err_init_tunnel_filter_list;
+ ret = i40e_init_fdir_filter_list(dev);
+ if (ret < 0)
+ goto err_init_fdir_filter_list;
return 0;
+err_init_fdir_filter_list:
+ rte_free(pf->tunnel.hash_table);
+ rte_free(pf->tunnel.hash_map);
+err_init_tunnel_filter_list:
+ rte_free(pf->ethertype.hash_table);
+ rte_free(pf->ethertype.hash_map);
+err_init_ethtype_filter_list:
+ rte_free(dev->data->mac_addrs);
err_mac_alloc:
i40e_vsi_release(pf->main_vsi);
err_setup_pf_switch:
@@ -1200,12 +1337,73 @@ err_sync_phy_type:
return ret;
}
+static void
+i40e_rm_ethtype_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter *p_ethertype;
+ struct i40e_ethertype_rule *ethertype_rule;
+
+ ethertype_rule = &pf->ethertype;
+ /* Remove all ethertype filter rules and hash */
+ if (ethertype_rule->hash_map)
+ rte_free(ethertype_rule->hash_map);
+ if (ethertype_rule->hash_table)
+ rte_hash_free(ethertype_rule->hash_table);
+
+ while ((p_ethertype = TAILQ_FIRST(&ethertype_rule->ethertype_list))) {
+ TAILQ_REMOVE(&ethertype_rule->ethertype_list,
+ p_ethertype, rules);
+ rte_free(p_ethertype);
+ }
+}
+
+static void
+i40e_rm_tunnel_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter *p_tunnel;
+ struct i40e_tunnel_rule *tunnel_rule;
+
+ tunnel_rule = &pf->tunnel;
+ /* Remove all tunnel director rules and hash */
+ if (tunnel_rule->hash_map)
+ rte_free(tunnel_rule->hash_map);
+ if (tunnel_rule->hash_table)
+ rte_hash_free(tunnel_rule->hash_table);
+
+ while ((p_tunnel = TAILQ_FIRST(&tunnel_rule->tunnel_list))) {
+ TAILQ_REMOVE(&tunnel_rule->tunnel_list, p_tunnel, rules);
+ rte_free(p_tunnel);
+ }
+}
+
+static void
+i40e_rm_fdir_filter_list(struct i40e_pf *pf)
+{
+ struct i40e_fdir_filter *p_fdir;
+ struct i40e_fdir_info *fdir_info;
+
+ fdir_info = &pf->fdir;
+ /* Remove all flow director rules and hash */
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_table)
+ rte_hash_free(fdir_info->hash_table);
+
+ while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules);
+ rte_free(p_fdir);
+ }
+}
+
static int
eth_i40e_dev_uninit(struct rte_eth_dev *dev)
{
+ struct i40e_pf *pf;
struct rte_pci_device *pci_dev;
+ struct rte_intr_handle *intr_handle;
struct i40e_hw *hw;
struct i40e_filter_control_settings settings;
+ struct rte_flow *p_flow;
int ret;
uint8_t aq_fail = 0;
@@ -1214,8 +1412,10 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- pci_dev = dev->pci_dev;
+ pci_dev = I40E_DEV_TO_PCI(dev);
+ intr_handle = &pci_dev->intr_handle;
if (hw->adapter_stopped == 0)
i40e_dev_close(dev);
@@ -1245,11 +1445,21 @@ eth_i40e_dev_uninit(struct rte_eth_dev *dev)
dev->data->mac_addrs = NULL;
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* register callback func to eal lib */
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- i40e_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_unregister(intr_handle,
+ i40e_dev_interrupt_handler, dev);
+
+ i40e_rm_ethtype_filter_list(pf);
+ i40e_rm_tunnel_filter_list(pf);
+ i40e_rm_fdir_filter_list(pf);
+
+ /* Remove all flows */
+ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
+ TAILQ_REMOVE(&pf->flow_list, p_flow, node);
+ rte_free(p_flow);
+ }
return 0;
}
@@ -1315,6 +1525,8 @@ i40e_dev_configure(struct rte_eth_dev *dev)
}
}
+ TAILQ_INIT(&pf->flow_list);
+
return 0;
err_dcb:
@@ -1335,7 +1547,8 @@ void
i40e_vsi_queues_unbind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t i;
@@ -1448,7 +1661,8 @@ void
i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_vect = vsi->msix_intr;
uint16_t nb_msix = RTE_MIN(vsi->nb_msix, intr_handle->nb_efd);
@@ -1519,7 +1733,8 @@ static void
i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t interval = i40e_calc_itr_interval(\
RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -1550,7 +1765,8 @@ static void
i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi)
{
struct rte_eth_dev *dev = vsi->adapter->eth_dev;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
uint16_t msix_intr, i;
@@ -1629,7 +1845,7 @@ i40e_phy_conf_link(struct i40e_hw *hw,
/* use get_phy_abilities_resp value for the rest */
phy_conf.phy_type = phy_ab.phy_type;
phy_conf.phy_type_ext = phy_ab.phy_type_ext;
- phy_conf.fec_config = phy_ab.mod_type_ext;
+ phy_conf.fec_config = phy_ab.fec_cfg_curr_mod_ext_info;
phy_conf.eee_capability = phy_ab.eee_capability;
phy_conf.eeer = phy_ab.eeer_val;
phy_conf.low_power_ctrl = phy_ab.d3_lpan;
@@ -1676,8 +1892,10 @@ i40e_dev_start(struct rte_eth_dev *dev)
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
int ret, i;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
+ struct i40e_vsi *vsi;
hw->adapter_stopped = 0;
@@ -1693,8 +1911,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
!RTE_ETH_DEV_SRIOV(dev).active) &&
dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = dev->data->nb_rx_queues;
- if (rte_intr_efd_enable(intr_handle, intr_vector))
- return -1;
+ ret = rte_intr_efd_enable(intr_handle, intr_vector);
+ if (ret)
+ return ret;
}
if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
@@ -1703,8 +1922,9 @@ i40e_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int),
0);
if (!intr_handle->intr_vec) {
- PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate %d rx_queues intr_vec",
+ dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -1754,6 +1974,15 @@ i40e_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(INFO, "fail to set vsi broadcast");
}
+ /* Enable the VLAN promiscuous mode. */
+ if (pf->vfs) {
+ for (i = 0; i < pf->vf_num; i++) {
+ vsi = pf->vfs[i].vsi;
+ i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid,
+ true, NULL);
+ }
+ }
+
/* Apply link configure */
if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
@@ -1777,8 +2006,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
i40e_pf_enable_irq0(hw);
if (dev->data->dev_conf.intr_conf.lsc != 0)
- PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ PMD_INIT_LOG(INFO,
+ "lsc won't enable because of no intr multiplex");
} else if (dev->data->dev_conf.intr_conf.lsc != 0) {
ret = i40e_aq_set_phy_int_mask(hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
@@ -1794,6 +2023,8 @@ i40e_dev_start(struct rte_eth_dev *dev)
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
+ i40e_filter_restore(pf);
+
return I40E_SUCCESS;
err_up:
@@ -1809,7 +2040,8 @@ i40e_dev_stop(struct rte_eth_dev *dev)
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_vsi *main_vsi = pf->main_vsi;
struct i40e_mirror_rule *p_mirror;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int i;
/* Disable all queues */
@@ -1860,6 +2092,8 @@ i40e_dev_close(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t reg;
int i;
@@ -1871,7 +2105,7 @@ i40e_dev_close(struct rte_eth_dev *dev)
/* Disable interrupt */
i40e_pf_disable_irq0(hw);
- rte_intr_disable(&(dev->pci_dev->intr_handle));
+ rte_intr_disable(intr_handle);
/* shutdown and destroy the HMC */
i40e_shutdown_lan_hmc(hw);
@@ -2069,6 +2303,8 @@ out:
if (link.link_status == old.link_status)
return -1;
+ i40e_notify_all_vfs_link_status(dev);
+
return 0;
}
@@ -2579,19 +2815,49 @@ i40e_dev_queue_stats_mapping_set(__rte_unused struct rte_eth_dev *dev,
return -ENOSYS;
}
+static int
+i40e_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 full_ver;
+ u8 ver, patch;
+ u16 build;
+ int ret;
+
+ full_ver = hw->nvm.oem_ver;
+ ver = (u8)(full_ver >> 24);
+ build = (u16)((full_ver >> 8) & 0xffff);
+ patch = (u8)(full_ver & 0xff);
+
+ ret = snprintf(fw_version, fw_size,
+ "%d.%d%d 0x%08x %d.%d.%d",
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack,
+ ver, build, patch);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vsi *vsi = pf->main_vsi;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = vsi->nb_qps;
dev_info->max_tx_queues = vsi->nb_qps;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
dev_info->max_mac_addrs = vsi->max_macaddrs;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2648,6 +2914,8 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
.nb_max = I40E_MAX_RING_DESC,
.nb_min = I40E_MIN_RING_DESC,
.nb_align = I40E_ALIGN_RING_DESC,
+ .nb_seg_max = I40E_TX_MAX_SEG,
+ .nb_mtu_seg_max = I40E_TX_MAX_MTU_SEG,
};
if (pf->flags & I40E_FLAG_VMDQ) {
@@ -2708,7 +2976,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
else {
ret = -EINVAL;
PMD_DRV_LOG(ERR,
- "Unsupported vlan type in single vlan.\n");
+ "Unsupported vlan type in single vlan.");
return ret;
}
break;
@@ -2720,13 +2988,15 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
&reg_r, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Fail to debug read from "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ PMD_DRV_LOG(ERR,
+ "Fail to debug read from I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
ret = -EIO;
return ret;
}
- PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: "
- "0x%08"PRIx64"", reg_id, reg_r);
+ PMD_DRV_LOG(DEBUG,
+ "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: 0x%08"PRIx64,
+ reg_id, reg_r);
reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK));
reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT);
@@ -2740,12 +3010,14 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
reg_w, NULL);
if (ret != I40E_SUCCESS) {
ret = -EIO;
- PMD_DRV_LOG(ERR, "Fail to debug write to "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id);
+ PMD_DRV_LOG(ERR,
+ "Fail to debug write to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_id);
return ret;
}
- PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to "
- "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id);
+ PMD_DRV_LOG(DEBUG,
+ "Debug write 0x%08"PRIx64" to I40E_GL_SWT_L2TAGCTRL[%d]",
+ reg_w, reg_id);
return ret;
}
@@ -2889,8 +3161,9 @@ i40e_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
max_high_water = I40E_RXPBSIZE >> I40E_KILOSHIFT;
if ((fc_conf->high_water > max_high_water) ||
(fc_conf->high_water < fc_conf->low_water)) {
- PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB, "
- "High_water must <= %d.", max_high_water);
+ PMD_INIT_LOG(ERR,
+ "Invalid high/low water setup value in KB, High_water must be <= %d.",
+ max_high_water);
return -EINVAL;
}
@@ -2994,7 +3267,7 @@ i40e_priority_flow_ctrl_set(__rte_unused struct rte_eth_dev *dev,
}
/* Add a MAC address, and update filters */
-static void
+static int
i40e_macaddr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
__rte_unused uint32_t index,
@@ -3011,13 +3284,13 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "VMDQ not %s, can't set mac to pool %u",
pf->flags & I40E_FLAG_VMDQ ? "configured" : "enabled",
pool);
- return;
+ return -ENOTSUP;
}
if (pool > pf->nb_cfg_vmdq_vsi) {
PMD_DRV_LOG(ERR, "Pool number %u invalid. Max pool is %u",
pool, pf->nb_cfg_vmdq_vsi);
- return;
+ return -EINVAL;
}
(void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
@@ -3034,8 +3307,9 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
ret = i40e_vsi_add_mac(vsi, &mac_filter);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to add MACVLAN filter");
- return;
+ return -ENODEV;
}
+ return 0;
}
/* Remove a MAC address, and update filters */
@@ -3062,8 +3336,8 @@ i40e_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
/* No VMDQ pool enabled or configured */
if (!(pf->flags & I40E_FLAG_VMDQ) ||
(i > pf->nb_cfg_vmdq_vsi)) {
- PMD_DRV_LOG(ERR, "No VMDQ pool enabled"
- "/configured");
+ PMD_DRV_LOG(ERR,
+ "No VMDQ pool enabled/configured");
return;
}
vsi = pf->vmdq[i - 1].vsi;
@@ -3264,9 +3538,9 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, lut_size);
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
return -EINVAL;
}
@@ -3305,9 +3579,9 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != lut_size ||
reta_size > ETH_RSS_RETA_SIZE_512) {
- PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
- "(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, lut_size);
+ PMD_DRV_LOG(ERR,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)",
+ reta_size, lut_size);
return -EINVAL;
}
@@ -3362,8 +3636,9 @@ i40e_allocate_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
mem->va = mz->addr;
mem->pa = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
mem->zone = (const void *)mz;
- PMD_DRV_LOG(DEBUG, "memzone %s allocated with physical address: "
- "%"PRIu64, mz->name, mem->pa);
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s allocated with physical address: %"PRIu64,
+ mz->name, mem->pa);
return I40E_SUCCESS;
}
@@ -3380,9 +3655,9 @@ i40e_free_dma_mem_d(__attribute__((unused)) struct i40e_hw *hw,
if (!mem)
return I40E_ERR_PARAM;
- PMD_DRV_LOG(DEBUG, "memzone %s to be freed with physical address: "
- "%"PRIu64, ((const struct rte_memzone *)mem->zone)->name,
- mem->pa);
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s to be freed with physical address: %"PRIu64,
+ ((const struct rte_memzone *)mem->zone)->name, mem->pa);
rte_memzone_free((const struct rte_memzone *)mem->zone);
mem->zone = NULL;
mem->va = NULL;
@@ -3493,9 +3768,10 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint16_t qp_count = 0, vsi_count = 0;
- if (dev->pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
+ if (pci_dev->max_vfs && !hw->func_caps.sr_iov_1_1) {
PMD_INIT_LOG(ERR, "HW configuration doesn't support SRIOV");
return -EINVAL;
}
@@ -3536,13 +3812,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
/* VF queue/VSI allocation */
pf->vf_qp_offset = pf->lan_qp_offset + pf->lan_nb_qps;
- if (hw->func_caps.sr_iov_1_1 && dev->pci_dev->max_vfs) {
+ if (hw->func_caps.sr_iov_1_1 && pci_dev->max_vfs) {
pf->flags |= I40E_FLAG_SRIOV;
pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
- pf->vf_num = dev->pci_dev->max_vfs;
- PMD_DRV_LOG(DEBUG, "%u VF VSIs, %u queues per VF VSI, "
- "in total %u queues", pf->vf_num, pf->vf_nb_qps,
- pf->vf_nb_qps * pf->vf_num);
+ pf->vf_num = pci_dev->max_vfs;
+ PMD_DRV_LOG(DEBUG,
+ "%u VF VSIs, %u queues per VF VSI, in total %u queues",
+ pf->vf_num, pf->vf_nb_qps, pf->vf_nb_qps * pf->vf_num);
} else {
pf->vf_nb_qps = 0;
pf->vf_num = 0;
@@ -3570,14 +3846,13 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
if (pf->max_nb_vmdq_vsi) {
pf->flags |= I40E_FLAG_VMDQ;
pf->vmdq_nb_qps = pf->vmdq_nb_qp_max;
- PMD_DRV_LOG(DEBUG, "%u VMDQ VSIs, %u queues "
- "per VMDQ VSI, in total %u queues",
- pf->max_nb_vmdq_vsi,
- pf->vmdq_nb_qps, pf->vmdq_nb_qps *
- pf->max_nb_vmdq_vsi);
+ PMD_DRV_LOG(DEBUG,
+ "%u VMDQ VSIs, %u queues per VMDQ VSI, in total %u queues",
+ pf->max_nb_vmdq_vsi, pf->vmdq_nb_qps,
+ pf->vmdq_nb_qps * pf->max_nb_vmdq_vsi);
} else {
- PMD_DRV_LOG(INFO, "No enough queues left for "
- "VMDq");
+ PMD_DRV_LOG(INFO,
+ "No enough queues left for VMDq");
}
} else {
PMD_DRV_LOG(INFO, "No queue or VSI left for VMDq");
@@ -3590,15 +3865,15 @@ i40e_pf_parameter_init(struct rte_eth_dev *dev)
pf->flags |= I40E_FLAG_DCB;
if (qp_count > hw->func_caps.num_tx_qp) {
- PMD_DRV_LOG(ERR, "Failed to allocate %u queues, which exceeds "
- "the hardware maximum %u", qp_count,
- hw->func_caps.num_tx_qp);
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u queues, which exceeds the hardware maximum %u",
+ qp_count, hw->func_caps.num_tx_qp);
return -EINVAL;
}
if (vsi_count > hw->func_caps.num_vsis) {
- PMD_DRV_LOG(ERR, "Failed to allocate %u VSIs, which exceeds "
- "the hardware maximum %u", vsi_count,
- hw->func_caps.num_vsis);
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate %u VSIs, which exceeds the hardware maximum %u",
+ vsi_count, hw->func_caps.num_vsis);
return -EINVAL;
}
@@ -3844,8 +4119,8 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool,
*/
entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
if (entry == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "resource pool");
+ PMD_DRV_LOG(ERR,
+ "Failed to allocate memory for resource pool");
return -ENOMEM;
}
entry->base = valid_entry->base;
@@ -3885,9 +4160,9 @@ validate_tcmap_parameter(struct i40e_vsi *vsi, uint8_t enabled_tcmap)
}
if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
- PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
- "HW support 0x%x", hw->func_caps.enabled_tcmap,
- enabled_tcmap);
+ PMD_DRV_LOG(ERR,
+ "Enabled TC map 0x%x not applicable to HW support 0x%x",
+ hw->func_caps.enabled_tcmap, enabled_tcmap);
return I40E_NOT_SUPPORTED;
}
return I40E_SUCCESS;
@@ -4105,12 +4380,13 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi)
hw->aq.asq_last_status);
goto fail;
}
+ veb->enabled_tc = I40E_DEFAULT_TCMAP;
/* get statistics index */
ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL,
&veb->stats_idx, NULL, NULL, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d",
+ PMD_DRV_LOG(ERR, "Get veb statistics index failed, aq_err: %d",
hw->aq.asq_last_status);
goto fail;
}
@@ -4232,8 +4508,8 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi)
struct i40e_mac_filter *f;
struct ether_addr *mac;
- PMD_DRV_LOG(WARNING, "Cannot remove the default "
- "macvlan filter");
+ PMD_DRV_LOG(DEBUG,
+ "Cannot remove the default macvlan filter");
/* It needs to add the permanent mac into mac list */
f = rte_zmalloc("macv_filter", sizeof(*f), 0);
if (f == NULL) {
@@ -4283,8 +4559,9 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi)
ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid,
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
- "configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "VSI failed to get TC bandwdith configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
@@ -4351,7 +4628,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret)
- PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n",
+ PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d",
hw->aq.asq_last_status);
}
@@ -4372,14 +4649,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV &&
uplink_vsi == NULL) {
- PMD_DRV_LOG(ERR, "VSI setup failed, "
- "VSI link shouldn't be NULL");
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, VSI link shouldn't be NULL");
return NULL;
}
if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
- PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
- "uplink VSI should be NULL");
+ PMD_DRV_LOG(ERR,
+ "VSI setup failed, MAIN VSI uplink VSI should be NULL");
return NULL;
}
@@ -4423,6 +4700,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
vsi->max_macaddrs = I40E_NUM_MACADDR_MAX;
vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi;
vsi->user_param = user_param;
+ vsi->vlan_anti_spoof_on = 0;
+ vsi->vlan_filter_on = 0;
/* Allocate queues */
switch (vsi->type) {
case I40E_VSI_MAIN :
@@ -4530,8 +4809,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
ctxt.seid = vsi->seid;
@@ -4599,13 +4878,14 @@ i40e_vsi_setup(struct i40e_pf *pf,
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
- I40E_DEFAULT_TCMAP);
+ hw->func_caps.enabled_tcmap);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
- ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
+
+ ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap;
ctxt.info.valid_sections |=
rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID);
/**
@@ -4644,8 +4924,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -4662,8 +4942,8 @@ i40e_vsi_setup(struct i40e_pf *pf,
ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info,
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping.");
+ PMD_DRV_LOG(ERR,
+ "Failed to configure TC queue mapping.");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
@@ -4926,8 +5206,9 @@ i40e_pf_setup(struct i40e_pf *pf)
/* make queue allocated first, let FDIR use queue pair 0*/
ret = i40e_res_pool_alloc(&pf->qp_pool, I40E_DEFAULT_QP_NUM_FDIR);
if (ret != I40E_FDIR_QUEUE_ID) {
- PMD_DRV_LOG(ERR, "queue allocation fails for FDIR :"
- " ret =%d", ret);
+ PMD_DRV_LOG(ERR,
+ "queue allocation fails for FDIR: ret =%d",
+ ret);
pf->flags &= ~I40E_FLAG_FDIR;
}
}
@@ -4946,12 +5227,12 @@ i40e_pf_setup(struct i40e_pf *pf)
else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512)
settings.hash_lut_size = I40E_HASH_LUT_SIZE_512;
else {
- PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n",
- hw->func_caps.rss_table_size);
+ PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported",
+ hw->func_caps.rss_table_size);
return I40E_ERR_PARAM;
}
- PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table "
- "size: %u\n", hw->func_caps.rss_table_size);
+ PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u",
+ hw->func_caps.rss_table_size);
pf->hash_lut_size = hw->func_caps.rss_table_size;
/* Enable ethtype and macvlan filters */
@@ -5201,8 +5482,8 @@ i40e_dev_rx_init(struct i40e_pf *pf)
ret = i40e_rx_queue_init(rxq);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to do RX queue "
- "initialization");
+ PMD_DRV_LOG(ERR,
+ "Failed to do RX queue initialization");
break;
}
}
@@ -5452,18 +5733,10 @@ static void
i40e_notify_all_vfs_link_status(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
- struct i40e_virtchnl_pf_event event;
int i;
- event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
- event.event_data.link_event.link_status =
- dev->data->dev_link.link_status;
- event.event_data.link_event.link_speed =
- (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
-
for (i = 0; i < pf->vf_num; i++)
- i40e_pf_host_send_msg_to_vf(&pf->vfs[i], I40E_VIRTCHNL_OP_EVENT,
- I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
+ i40e_notify_vf_link_status(dev, &pf->vfs[i]);
}
static void
@@ -5486,8 +5759,9 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
ret = i40e_clean_arq_element(hw, &info, &pending);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
- "aq_err: %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(INFO,
+ "Failed to read msg from AdminQ, aq_err: %u",
+ hw->aq.asq_last_status);
break;
}
opcode = rte_le_to_cpu_16(info.desc.opcode);
@@ -5504,14 +5778,12 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
break;
case i40e_aqc_opc_get_link_status:
ret = i40e_dev_link_update(dev, 0);
- if (!ret) {
- i40e_notify_all_vfs_link_status(dev);
+ if (!ret)
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC, NULL);
- }
break;
default:
- PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ PMD_DRV_LOG(DEBUG, "Request %u is not supported yet",
opcode);
break;
}
@@ -5532,8 +5804,7 @@ i40e_dev_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+i40e_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5550,7 +5821,6 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
PMD_DRV_LOG(INFO, "No interrupt event");
goto done;
}
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
if (icr0 & I40E_PFINT_ICR0_ECC_ERR_MASK)
PMD_DRV_LOG(ERR, "ICR0: unrecoverable ECC error");
if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK)
@@ -5565,7 +5835,6 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
PMD_DRV_LOG(ERR, "ICR0: HMC error");
if (icr0 & I40E_PFINT_ICR0_PE_CRITERR_MASK)
PMD_DRV_LOG(ERR, "ICR0: protocol engine critical error");
-#endif /* RTE_LIBRTE_I40E_DEBUG_DRIVER */
if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
PMD_DRV_LOG(INFO, "ICR0: VF reset detected");
@@ -5579,10 +5848,10 @@ i40e_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
done:
/* Enable interrupt */
i40e_pf_enable_irq0(hw);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(dev->intr_handle);
}
-static int
+int
i40e_add_macvlan_filters(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *filter,
int total)
@@ -5632,7 +5901,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi,
flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC match type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC match type");
ret = I40E_ERR_PARAM;
goto DONE;
}
@@ -5656,7 +5925,7 @@ DONE:
return ret;
}
-static int
+int
i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *filter,
int total)
@@ -5707,7 +5976,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid MAC filter type\n");
+ PMD_DRV_LOG(ERR, "Invalid MAC filter type");
ret = I40E_ERR_PARAM;
goto DONE;
}
@@ -5762,14 +6031,11 @@ i40e_find_vlan_filter(struct i40e_vsi *vsi,
}
static void
-i40e_set_vlan_filter(struct i40e_vsi *vsi,
- uint16_t vlan_id, bool on)
+i40e_store_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
{
uint32_t vid_idx, vid_bit;
- if (vlan_id > ETH_VLAN_ID_MAX)
- return;
-
vid_idx = I40E_VFTA_IDX(vlan_id);
vid_bit = I40E_VFTA_BIT(vlan_id);
@@ -5779,11 +6045,43 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi,
vsi->vfta[vid_idx] &= ~vid_bit;
}
+void
+i40e_set_vlan_filter(struct i40e_vsi *vsi,
+ uint16_t vlan_id, bool on)
+{
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ if (vlan_id > ETH_VLAN_ID_MAX)
+ return;
+
+ i40e_store_vlan_filter(vsi, vlan_id, on);
+
+ if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id)
+ return;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+
+ if (on) {
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR, "Failed to add vlan filter");
+ } else {
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS)
+ PMD_DRV_LOG(ERR,
+ "Failed to remove vlan filter");
+ }
+}
+
/**
* Find all vlan options for specific mac addr,
* return with actual vlan found.
*/
-static inline int
+int
i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
struct i40e_macvlan_filter *mv_f,
int num, struct ether_addr *addr)
@@ -5804,8 +6102,8 @@ i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
if (vsi->vfta[j] & (1 << k)) {
if (i > num - 1) {
- PMD_DRV_LOG(ERR, "vlan number "
- "not match");
+ PMD_DRV_LOG(ERR,
+ "vlan number doesn't match");
return I40E_ERR_PARAM;
}
(void)rte_memcpy(&mv_f[i].macaddr,
@@ -6098,7 +6396,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr)
if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
filter_type == RTE_MACVLAN_HASH_MATCH) {
if (vlan_num == 0) {
- PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n");
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
return I40E_ERR_PARAM;
}
} else if (filter_type == RTE_MAC_PERFECT_MATCH ||
@@ -6207,18 +6505,14 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV4;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK))
rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV4_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER))
@@ -6227,18 +6521,14 @@ i40e_parse_hena(uint64_t flags)
rss_hf |= ETH_RSS_FRAG_IPV6;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK))
rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
-#ifdef X722_SUPPORT
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
-#endif
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP))
rss_hf |= ETH_RSS_NONFRAG_IPV6_SCTP;
if (flags & (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER))
@@ -6289,8 +6579,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
if (ret)
- PMD_INIT_LOG(ERR, "Failed to configure RSS key "
- "via AQ");
+ PMD_INIT_LOG(ERR, "Failed to configure RSS key via AQ");
} else {
uint32_t *hash_key = (uint32_t *)key;
uint16_t i;
@@ -6436,7 +6725,95 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag)
return 0;
}
+/* Convert tunnel filter structure */
+static int
+i40e_tunnel_filter_convert(
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ ether_addr_copy((struct ether_addr *)&cld_filter->element.outer_mac,
+ (struct ether_addr *)&tunnel_filter->input.outer_mac);
+ ether_addr_copy((struct ether_addr *)&cld_filter->element.inner_mac,
+ (struct ether_addr *)&tunnel_filter->input.inner_mac);
+ tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
+ if ((rte_le_to_cpu_16(cld_filter->element.flags) &
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
+ I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
+ tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ else
+ tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ tunnel_filter->input.flags = cld_filter->element.flags;
+ tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
+ tunnel_filter->queue = cld_filter->element.queue_number;
+ rte_memcpy(tunnel_filter->input.general_fields,
+ cld_filter->general_fields,
+ sizeof(cld_filter->general_fields));
+
+ return 0;
+}
+
+/* Check if there exists the tunnel filter */
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(tunnel_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return tunnel_rule->hash_map[ret];
+}
+
+/* Add a tunnel filter into the SW list */
static int
+i40e_sw_tunnel_filter_insert(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *tunnel_filter)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &tunnel_filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = tunnel_filter;
+
+ TAILQ_INSERT_TAIL(&rule->tunnel_list, tunnel_filter, rules);
+
+ return 0;
+}
+
+/* Delete a tunnel filter from the SW list */
+int
+i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input)
+{
+ struct i40e_tunnel_rule *rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel_filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete tunnel filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ tunnel_filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->tunnel_list, tunnel_filter, rules);
+ rte_free(tunnel_filter);
+
+ return 0;
+}
+
+int
i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
struct rte_eth_tunnel_filter_conf *tunnel_filter,
uint8_t add)
@@ -6449,37 +6826,44 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
int val, ret = 0;
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
struct i40e_vsi *vsi = pf->main_vsi;
- struct i40e_aqc_add_remove_cloud_filters_element_data *cld_filter;
- struct i40e_aqc_add_remove_cloud_filters_element_data *pfilter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
cld_filter = rte_zmalloc("tunnel_filter",
- sizeof(struct i40e_aqc_add_remove_cloud_filters_element_data),
- 0);
+ sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+ 0);
if (NULL == cld_filter) {
PMD_DRV_LOG(ERR, "Failed to alloc memory.");
- return -EINVAL;
+ return -ENOMEM;
}
pfilter = cld_filter;
- ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac);
- ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac);
+ ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct ether_addr *)&pfilter->element.outer_mac);
+ ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct ether_addr *)&pfilter->element.inner_mac);
- pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ pfilter->element.inner_vlan =
+ rte_cpu_to_le_16(tunnel_filter->inner_vlan);
if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
- rte_memcpy(&pfilter->ipaddr.v4.data,
+ rte_memcpy(&pfilter->element.ipaddr.v4.data,
&rte_cpu_to_le_32(ipv4_addr),
- sizeof(pfilter->ipaddr.v4.data));
+ sizeof(pfilter->element.ipaddr.v4.data));
} else {
ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
for (i = 0; i < 4; i++) {
convert_ipv6[i] =
rte_cpu_to_le_32(rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv6_addr[i]));
}
- rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6,
- sizeof(pfilter->ipaddr.v6.data));
+ rte_memcpy(&pfilter->element.ipaddr.v6.data,
+ &convert_ipv6,
+ sizeof(pfilter->element.ipaddr.v6.data));
}
/* check tunneled type */
@@ -6501,23 +6885,369 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
}
val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
- &pfilter->flags);
+ &pfilter->element.flags);
if (val < 0) {
rte_free(cld_filter);
return -EINVAL;
}
- pfilter->flags |= rte_cpu_to_le_16(
+ pfilter->element.flags |= rte_cpu_to_le_16(
I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
- pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
- pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id);
+ pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->element.queue_number =
+ rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
- if (add)
- ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1);
- else
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
+ ret = i40e_aq_add_cloud_filters(hw,
+ vsi->seid, &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return -ENOTSUP;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
- cld_filter, 1);
+ &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return -ENOTSUP;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
+
+ rte_free(cld_filter);
+ return ret;
+}
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0 0x48
+#define I40E_TR_VXLAN_GRE_KEY_MASK 0x4
+#define I40E_TR_GENEVE_KEY_MASK 0x8
+#define I40E_TR_GENERIC_UDP_TUNNEL_MASK 0x40
+#define I40E_TR_GRE_KEY_MASK 0x400
+#define I40E_TR_GRE_KEY_WITH_XSUM_MASK 0x800
+#define I40E_TR_GRE_NO_KEY_MASK 0x8000
+
+static enum
+i40e_status_code i40e_replace_mpls_l1_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC;
+ filter_replace.new_filter_type = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace.tr_bit = 0;
+
+ /* Prepare the buffer, 3 entries */
+ filter_replace_buf.data[0] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[2] = 0xFF;
+ filter_replace_buf.data[3] = 0xFF;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[7] = 0xF0;
+ filter_replace_buf.data[8]
+ = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_TR_WORD0;
+ filter_replace_buf.data[8] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[10] = I40E_TR_VXLAN_GRE_KEY_MASK |
+ I40E_TR_GENEVE_KEY_MASK |
+ I40E_TR_GENERIC_UDP_TUNNEL_MASK;
+ filter_replace_buf.data[11] = (I40E_TR_GRE_KEY_MASK |
+ I40E_TR_GRE_KEY_WITH_XSUM_MASK |
+ I40E_TR_GRE_NO_KEY_MASK) >> 8;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+static enum
+i40e_status_code i40e_replace_mpls_cloud_filter(struct i40e_pf *pf)
+{
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ /* For MPLSoUDP */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
+ I40E_AQC_MIRROR_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IIP;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (status < 0)
+ return status;
+
+ /* For MPLSoGRE */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER |
+ I40E_AQC_MIRROR_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_IMAC;
+ filter_replace.new_filter_type =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_L1_FILTER_TEID_MPLS;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+
+ status = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return status;
+}
+
+int
+i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_conf *tunnel_filter,
+ uint8_t add)
+{
+ uint16_t ip_type;
+ uint32_t ipv4_addr;
+ uint8_t i, tun_type = 0;
+ /* internal variable to convert ipv6 byte order */
+ uint32_t convert_ipv6[4];
+ int val, ret = 0;
+ struct i40e_pf_vf *vf = NULL;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *cld_filter;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext *pfilter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *tunnel, *node;
+ struct i40e_tunnel_filter check_filter; /* Check if filter exists */
+ uint32_t teid_le;
+ bool big_buffer = 0;
+
+ cld_filter = rte_zmalloc("tunnel_filter",
+ sizeof(struct i40e_aqc_add_rm_cloud_filt_elem_ext),
+ 0);
+
+ if (cld_filter == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory.");
+ return -ENOMEM;
+ }
+ pfilter = cld_filter;
+
+ ether_addr_copy(&tunnel_filter->outer_mac,
+ (struct ether_addr *)&pfilter->element.outer_mac);
+ ether_addr_copy(&tunnel_filter->inner_mac,
+ (struct ether_addr *)&pfilter->element.inner_mac);
+
+ pfilter->element.inner_vlan =
+ rte_cpu_to_le_16(tunnel_filter->inner_vlan);
+ if (tunnel_filter->ip_type == I40E_TUNNEL_IPTYPE_IPV4) {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4;
+ ipv4_addr = rte_be_to_cpu_32(tunnel_filter->ip_addr.ipv4_addr);
+ rte_memcpy(&pfilter->element.ipaddr.v4.data,
+ &rte_cpu_to_le_32(ipv4_addr),
+ sizeof(pfilter->element.ipaddr.v4.data));
+ } else {
+ ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6;
+ for (i = 0; i < 4; i++) {
+ convert_ipv6[i] =
+ rte_cpu_to_le_32(rte_be_to_cpu_32(
+ tunnel_filter->ip_addr.ipv6_addr[i]));
+ }
+ rte_memcpy(&pfilter->element.ipaddr.v6.data,
+ &convert_ipv6,
+ sizeof(pfilter->element.ipaddr.v6.data));
+ }
+
+ /* check tunneled type */
+ switch (tunnel_filter->tunnel_type) {
+ case I40E_TUNNEL_TYPE_VXLAN:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN;
+ break;
+ case I40E_TUNNEL_TYPE_NVGRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC;
+ break;
+ case I40E_TUNNEL_TYPE_IP_IN_GRE:
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
+ break;
+ case I40E_TUNNEL_TYPE_MPLSoUDP:
+ if (!pf->mpls_replace_flag) {
+ i40e_replace_mpls_l1_filter(pf);
+ i40e_replace_mpls_cloud_filter(pf);
+ pf->mpls_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ teid_le >> 4;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ (teid_le & 0xF) << 12;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ 0x40;
+ big_buffer = 1;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP;
+ break;
+ case I40E_TUNNEL_TYPE_MPLSoGRE:
+ if (!pf->mpls_replace_flag) {
+ i40e_replace_mpls_l1_filter(pf);
+ i40e_replace_mpls_cloud_filter(pf);
+ pf->mpls_replace_flag = 1;
+ }
+ teid_le = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0] =
+ teid_le >> 4;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1] =
+ (teid_le & 0xF) << 12;
+ pfilter->general_fields[I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2] =
+ 0x0;
+ big_buffer = 1;
+ tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE;
+ break;
+ case I40E_TUNNEL_TYPE_QINQ:
+ if (!pf->qinq_replace_flag) {
+ ret = i40e_cloud_filter_qinq_create(pf);
+ if (ret < 0)
+ PMD_DRV_LOG(DEBUG,
+ "QinQ tunnel filter already created.");
+ pf->qinq_replace_flag = 1;
+ }
+ /* Add in the General fields the values of
+ * the Outer and Inner VLAN
+ * Big Buffer should be set, see changes in
+ * i40e_aq_add_cloud_filters
+ */
+ pfilter->general_fields[0] = tunnel_filter->inner_vlan;
+ pfilter->general_fields[1] = tunnel_filter->outer_vlan;
+ big_buffer = 1;
+ break;
+ default:
+ /* Other tunnel types is not supported. */
+ PMD_DRV_LOG(ERR, "tunnel type is not supported.");
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+
+ if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoUDP)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_MPLSoGRE)
+ pfilter->element.flags =
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE;
+ else if (tunnel_filter->tunnel_type == I40E_TUNNEL_TYPE_QINQ)
+ pfilter->element.flags |=
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ else {
+ val = i40e_dev_get_filter_type(tunnel_filter->filter_type,
+ &pfilter->element.flags);
+ if (val < 0) {
+ rte_free(cld_filter);
+ return -EINVAL;
+ }
+ }
+
+ pfilter->element.flags |= rte_cpu_to_le_16(
+ I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE |
+ ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT));
+ pfilter->element.tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id);
+ pfilter->element.queue_number =
+ rte_cpu_to_le_16(tunnel_filter->queue_id);
+
+ if (!tunnel_filter->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ if (tunnel_filter->vf_id >= pf->vf_num) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+ vf = &pf->vfs[tunnel_filter->vf_id];
+ vsi = vf->vsi;
+ }
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_tunnel_filter_convert(cld_filter, &check_filter);
+ check_filter.is_to_vf = tunnel_filter->is_to_vf;
+ check_filter.vf_id = tunnel_filter->vf_id;
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing tunnel rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding tunnel filter!");
+ return -EINVAL;
+ }
+
+ if (add) {
+ if (big_buffer)
+ ret = i40e_aq_add_cloud_filters_big_buffer(hw,
+ vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_add_cloud_filters(hw,
+ vsi->seid, &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to add a tunnel filter.");
+ return -ENOTSUP;
+ }
+ tunnel = rte_zmalloc("tunnel_filter", sizeof(*tunnel), 0);
+ rte_memcpy(tunnel, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_tunnel_filter_insert(pf, tunnel);
+ } else {
+ if (big_buffer)
+ ret = i40e_aq_remove_cloud_filters_big_buffer(
+ hw, vsi->seid, cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter->element, 1);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "Failed to delete a tunnel filter.");
+ return -ENOTSUP;
+ }
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+ }
rte_free(cld_filter);
return ret;
@@ -6554,8 +7284,9 @@ i40e_add_vxlan_port(struct i40e_pf *pf, uint16_t port)
/* Now check if there is space to add the new port */
idx = i40e_get_vxlan_port_idx(pf, 0);
if (idx < 0) {
- PMD_DRV_LOG(ERR, "Maximum number of UDP ports reached,"
- "not adding port %d", port);
+ PMD_DRV_LOG(ERR,
+ "Maximum number of UDP ports reached, not adding port %d",
+ port);
return -ENOSPC;
}
@@ -6794,7 +7525,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
int ret = -EINVAL;
val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2));
- PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val);
+ PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val);
if (len == 3) {
reg = val | I40E_GL_PRS_FVBM_MSK_ENA;
@@ -6813,7 +7544,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len)
} else {
ret = 0;
}
- PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n",
+ PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x",
I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)));
return ret;
@@ -6926,15 +7657,15 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable)
if (enable > 0) {
if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) {
- PMD_DRV_LOG(INFO, "Symmetric hash has already "
- "been enabled");
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been enabled");
return;
}
reg |= I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
} else {
if (!(reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK)) {
- PMD_DRV_LOG(INFO, "Symmetric hash has already "
- "been disabled");
+ PMD_DRV_LOG(INFO,
+ "Symmetric hash has already been disabled");
return;
}
reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK;
@@ -7051,23 +7782,60 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw,
pctype = i40e_flowtype_to_pctype(i);
reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ?
I40E_GLQF_HSYM_SYMH_ENA_MASK : 0;
- i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ if (hw->mac.type == I40E_MAC_X722) {
+ if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV4_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_UDP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP),
+ reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP),
+ reg);
+ } else if (pctype == I40E_FILTER_PCTYPE_NONF_IPV6_TCP) {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP), reg);
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK),
+ reg);
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype),
+ reg);
+ }
+ } else {
+ i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg);
+ }
}
reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL);
if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) {
/* Toeplitz */
if (reg & I40E_GLQF_CTL_HTOEP_MASK) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to "
- "Toeplitz");
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Toeplitz");
goto out;
}
reg |= I40E_GLQF_CTL_HTOEP_MASK;
} else if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR) {
/* Simple XOR */
if (!(reg & I40E_GLQF_CTL_HTOEP_MASK)) {
- PMD_DRV_LOG(DEBUG, "Hash function already set to "
- "Simple XOR");
+ PMD_DRV_LOG(DEBUG,
+ "Hash function already set to Simple XOR");
goto out;
}
reg &= ~I40E_GLQF_CTL_HTOEP_MASK;
@@ -7110,7 +7878,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7129,7 +7896,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7139,7 +7905,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7149,7 +7914,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
I40E_INSET_TCP_FLAGS | I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7183,7 +7947,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_HOP_LIMIT | I40E_INSET_IPV6_SRC |
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7202,7 +7965,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7212,7 +7974,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7222,7 +7983,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_DST | I40E_INSET_SRC_PORT |
I40E_INSET_DST_PORT | I40E_INSET_TCP_FLAGS |
I40E_INSET_FLEX_PAYLOAD,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_DMAC | I40E_INSET_SMAC |
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
@@ -7262,7 +8022,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
@@ -7273,19 +8032,16 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_IPV4_TOS | I40E_INSET_IPV4_TTL |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
@@ -7307,7 +8063,6 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
@@ -7318,19 +8073,16 @@ i40e_get_valid_input_set(enum i40e_filter_pctype pctype,
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_IPV6_TC | I40E_INSET_IPV6_HOP_LIMIT |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_VLAN_OUTER | I40E_INSET_VLAN_INNER |
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
@@ -7374,7 +8126,7 @@ i40e_validate_input_set(enum i40e_filter_pctype pctype,
}
/* default input set fields combination per pctype */
-static uint64_t
+uint64_t
i40e_get_default_input_set(uint16_t pctype)
{
static const uint64_t default_inset_table[] = {
@@ -7383,22 +8135,18 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
I40E_INSET_IPV4_SRC | I40E_INSET_IPV4_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7410,22 +8158,18 @@ i40e_get_default_input_set(uint16_t pctype)
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
I40E_INSET_IPV6_SRC | I40E_INSET_IPV6_DST |
I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT |
@@ -7681,10 +8425,10 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val)
{
uint32_t reg = i40e_read_rx_ctl(hw, addr);
- PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg);
+ PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg);
if (reg != val)
i40e_write_rx_ctl(hw, addr, val);
- PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr,
+ PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr,
(uint32_t)i40e_read_rx_ctl(hw, addr));
}
@@ -7995,16 +8739,95 @@ i40e_hash_filter_ctrl(struct rte_eth_dev *dev,
return ret;
}
+/* Convert ethertype filter structure */
+static int
+i40e_ethertype_filter_convert(const struct rte_eth_ethertype_filter *input,
+ struct i40e_ethertype_filter *filter)
+{
+ rte_memcpy(&filter->input.mac_addr, &input->mac_addr, ETHER_ADDR_LEN);
+ filter->input.ether_type = input->ether_type;
+ filter->flags = input->flags;
+ filter->queue = input->queue;
+
+ return 0;
+}
+
+/* Check if there exists the ehtertype filter */
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(ethertype_rule->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return ethertype_rule->hash_map[ret];
+}
+
+/* Add ethertype filter in SW list */
+static int
+i40e_sw_ethertype_filter_insert(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ int ret;
+
+ ret = rte_hash_add_key(rule->hash_table, &filter->input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ rule->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&rule->ethertype_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete ethertype filter in SW list */
+int
+i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input)
+{
+ struct i40e_ethertype_rule *rule = &pf->ethertype;
+ struct i40e_ethertype_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(rule->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete ethertype filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = rule->hash_map[ret];
+ rule->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&rule->ethertype_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* Configure ethertype filter, which can director packet by filtering
* with mac address and ether_type or only ether_type
*/
-static int
+int
i40e_ethertype_filter_set(struct i40e_pf *pf,
struct rte_eth_ethertype_filter *filter,
bool add)
{
struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *ethertype_filter, *node;
+ struct i40e_ethertype_filter check_filter;
struct i40e_control_filter_stats stats;
uint16_t flags = 0;
int ret;
@@ -8015,13 +8838,29 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
}
if (filter->ether_type == ETHER_TYPE_IPv4 ||
filter->ether_type == ETHER_TYPE_IPv6) {
- PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
- " control packet filter.", filter->ether_type);
+ PMD_DRV_LOG(ERR,
+ "unsupported ether_type(0x%04x) in control packet filter.",
+ filter->ether_type);
return -EINVAL;
}
if (filter->ether_type == ETHER_TYPE_VLAN)
- PMD_DRV_LOG(WARNING, "filter vlan ether_type in first tag is"
- " not supported.");
+ PMD_DRV_LOG(WARNING,
+ "filter vlan ether_type in first tag is not supported.");
+
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_ethertype_filter_convert(filter, &check_filter);
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule,
+ &check_filter.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR, "Conflict with existing ethertype rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR, "There's no corresponding ethertype filter!");
+ return -EINVAL;
+ }
if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
@@ -8036,14 +8875,25 @@ i40e_ethertype_filter_set(struct i40e_pf *pf,
pf->main_vsi->seid,
filter->queue, add, &stats, NULL);
- PMD_DRV_LOG(INFO, "add/rem control packet filter, return %d,"
- " mac_etype_used = %u, etype_used = %u,"
- " mac_etype_free = %u, etype_free = %u\n",
- ret, stats.mac_etype_used, stats.etype_used,
- stats.mac_etype_free, stats.etype_free);
+ PMD_DRV_LOG(INFO,
+ "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u",
+ ret, stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
if (ret < 0)
return -ENOSYS;
- return 0;
+
+ /* Add or delete a filter in SW list */
+ if (add) {
+ ethertype_filter = rte_zmalloc("ethertype_filter",
+ sizeof(*ethertype_filter), 0);
+ rte_memcpy(ethertype_filter, &check_filter,
+ sizeof(check_filter));
+ ret = i40e_sw_ethertype_filter_insert(pf, ethertype_filter);
+ } else {
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+ }
+
+ return ret;
}
/*
@@ -8078,7 +8928,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev,
FALSE);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -ENOSYS;
break;
}
@@ -8116,6 +8966,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_FDIR:
ret = i40e_fdir_ctrl_func(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &i40e_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
@@ -8133,10 +8988,11 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev,
static void
i40e_enable_extended_tag(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
uint32_t buf = 0;
int ret;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CAP_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8149,7 +9005,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
}
buf = 0;
- ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_pci_read_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
@@ -8161,7 +9017,7 @@ i40e_enable_extended_tag(struct rte_eth_dev *dev)
return;
}
buf |= PCI_DEV_CTRL_EXT_TAG_MASK;
- ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf),
+ ret = rte_pci_write_config(pci_dev, &buf, sizeof(buf),
PCI_DEV_CTRL_REG);
if (ret < 0) {
PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x",
@@ -8224,18 +9080,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV4] = RTE_ETH_FLOW_FRAG_IPV4,
[I40E_FILTER_PCTYPE_NONF_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV4_UDP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK] =
RTE_ETH_FLOW_NONFRAG_IPV4_TCP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV4_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV4_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV4_OTHER] =
@@ -8243,18 +9095,14 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype)
[I40E_FILTER_PCTYPE_FRAG_IPV6] = RTE_ETH_FLOW_FRAG_IPV6,
[I40E_FILTER_PCTYPE_NONF_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
[I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP] =
RTE_ETH_FLOW_NONFRAG_IPV6_UDP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP] =
RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
-#ifdef X722_SUPPORT
[I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK] =
RTE_ETH_FLOW_NONFRAG_IPV6_TCP,
-#endif
[I40E_FILTER_PCTYPE_NONF_IPV6_SCTP] =
RTE_ETH_FLOW_NONFRAG_IPV6_SCTP,
[I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] =
@@ -8369,9 +9217,9 @@ i40e_configure_registers(struct i40e_hw *hw)
ret = i40e_aq_debug_write_register(hw, reg_table[i].addr,
reg_table[i].val, NULL);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to write 0x%"PRIx64" to the "
- "address of 0x%"PRIx32, reg_table[i].val,
- reg_table[i].addr);
+ PMD_DRV_LOG(ERR,
+ "Failed to write 0x%"PRIx64" to the address of 0x%"PRIx32,
+ reg_table[i].val, reg_table[i].addr);
break;
}
PMD_DRV_LOG(DEBUG, "Write 0x%"PRIx64" to the address of "
@@ -8416,8 +9264,9 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi)
I40E_VSI_L2TAGSTXVALID(
vsi->vsi_id), reg, NULL);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "Failed to update "
- "VSI_L2TAGSTXVALID[%d]", vsi->vsi_id);
+ PMD_DRV_LOG(ERR,
+ "Failed to update VSI_L2TAGSTXVALID[%d]",
+ vsi->vsi_id);
return I40E_ERR_CONFIG;
}
}
@@ -8468,11 +9317,10 @@ i40e_aq_add_mirror_rule(struct i40e_hw *hw,
rte_memcpy(&desc.params.raw, &cmd, sizeof(cmd));
status = i40e_asq_send_command(hw, &desc, entries, buff_len, NULL);
- PMD_DRV_LOG(INFO, "i40e_aq_add_mirror_rule, aq_status %d,"
- "rule_id = %u"
- " mirror_rules_used = %u, mirror_rules_free = %u,",
- hw->aq.asq_last_status, resp->rule_id,
- resp->mirror_rules_used, resp->mirror_rules_free);
+ PMD_DRV_LOG(INFO,
+ "i40e_aq_add_mirror_rule, aq_status %d, rule_id = %u mirror_rules_used = %u, mirror_rules_free = %u,",
+ hw->aq.asq_last_status, resp->rule_id,
+ resp->mirror_rules_used, resp->mirror_rules_free);
*rule_id = rte_le_to_cpu_16(resp->rule_id);
return status;
@@ -8550,8 +9398,8 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
PMD_DRV_LOG(DEBUG, "i40e_mirror_rule_set: sw_id = %d.", sw_id);
if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
- PMD_DRV_LOG(ERR, "mirror rule can not be configured"
- " without veb or vfs.");
+ PMD_DRV_LOG(ERR,
+ "mirror rule can not be configured without veb or vfs.");
return -ENOSYS;
}
if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
@@ -8583,9 +9431,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
- " ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
@@ -8674,9 +9522,9 @@ i40e_mirror_rule_set(struct rte_eth_dev *dev,
mirr_rule->rule_type, mirr_rule->entries,
j, &rule_id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to add mirror rule:"
- " ret = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to add mirror rule: ret = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
rte_free(mirr_rule);
return -ENOSYS;
}
@@ -8728,9 +9576,9 @@ i40e_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t sw_id)
mirr_rule->entries,
mirr_rule->num_entries, mirr_rule->id);
if (ret < 0) {
- PMD_DRV_LOG(ERR, "failed to remove mirror rule:"
- " status = %d, aq_err = %d.",
- ret, hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "failed to remove mirror rule: status = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
TAILQ_REMOVE(&pf->mirror_list, mirr_rule, rules);
@@ -9162,9 +10010,9 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid,
&veb_bw, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation"
- " per TC failed = %d",
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "AQ command Config switch_comp BW allocation per TC failed = %d",
+ hw->aq.asq_last_status);
return ret;
}
@@ -9172,16 +10020,18 @@ i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map)
ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
&ets_query, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS"
- " configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp ETS configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
memset(&bw_query, 0, sizeof(bw_query));
ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
&bw_query, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth"
- " configuration %u", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR,
+ "Failed to get switch_comp bandwidth configuration %u",
+ hw->aq.asq_last_status);
return ret;
}
@@ -9246,8 +10096,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
}
ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &bw_data, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "AQ command Config VSI BW allocation"
- " per TC failed = %d",
+ PMD_INIT_LOG(ERR,
+ "AQ command Config VSI BW allocation per TC failed = %d",
hw->aq.asq_last_status);
goto out;
}
@@ -9268,9 +10118,8 @@ i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map)
/* Update the VSI after updating the VSI queue-mapping information */
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- PMD_INIT_LOG(ERR, "Failed to configure "
- "TC queue mapping = %d",
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR, "Failed to configure TC queue mapping = %d",
+ hw->aq.asq_last_status);
goto out;
}
/* update the local VSI info with updated queue map */
@@ -9322,8 +10171,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
/* Use the FW API if FW > v4.4*/
if (!(((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver >= 4)) ||
(hw->aq.fw_maj_ver >= 5))) {
- PMD_INIT_LOG(ERR, "FW < v4.4, can not use FW LLDP API"
- " to configure DCB");
+ PMD_INIT_LOG(ERR,
+ "FW < v4.4, can not use FW LLDP API to configure DCB");
return I40E_ERR_FIRMWARE_API_VERSION;
}
@@ -9338,8 +10187,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
old_cfg->etsrec = old_cfg->etscfg;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(ERR,
- "Set DCB Config failed, err %s aq_err %s\n",
+ PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s",
i40e_stat_str(hw, ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
@@ -9371,7 +10219,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VEB seid=%d\n",
+ "Failed configuring TC for VEB seid=%d",
main_vsi->veb->seid);
}
/* Update each VSI */
@@ -9389,8 +10237,8 @@ i40e_dcb_hw_configure(struct i40e_pf *pf,
I40E_DEFAULT_TCMAP);
if (ret)
PMD_INIT_LOG(WARNING,
- "Failed configuring TC for VSI seid=%d\n",
- vsi_list->vsi->seid);
+ "Failed configuring TC for VSI seid=%d",
+ vsi_list->vsi->seid);
/* continue */
}
}
@@ -9409,7 +10257,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret = 0;
+ int i, ret = 0;
if ((pf->flags & I40E_FLAG_DCB) == 0) {
PMD_INIT_LOG(ERR, "HW doesn't support DCB");
@@ -9436,6 +10284,9 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
hw->local_dcbx_config.etscfg.tcbwtable[0] = 100;
hw->local_dcbx_config.etscfg.tsatable[0] =
I40E_IEEE_TSA_ETS;
+ /* all UPs mapping to TC0 */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ hw->local_dcbx_config.etscfg.prioritytable[i] = 0;
hw->local_dcbx_config.etsrec =
hw->local_dcbx_config.etscfg;
hw->local_dcbx_config.pfc.willing = 0;
@@ -9450,15 +10301,15 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
I40E_APP_PROTOID_FCOE;
ret = i40e_set_dcb_config(hw);
if (ret) {
- PMD_INIT_LOG(ERR, "default dcb config fails."
- " err = %d, aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "default dcb config fails. err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOSYS;
}
} else {
- PMD_INIT_LOG(ERR, "DCB initialization in FW fails,"
- " err = %d, aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "DCB initialization in FW fails, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
} else {
@@ -9469,14 +10320,14 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb)
ret = i40e_init_dcb(hw);
if (!ret) {
if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
- PMD_INIT_LOG(ERR, "HW doesn't support"
- " DCBX offload.");
+ PMD_INIT_LOG(ERR,
+ "HW doesn't support DCBX offload.");
return -ENOTSUP;
}
} else {
- PMD_INIT_LOG(ERR, "DCBX configuration failed, err = %d,"
- " aq_err = %d.", ret,
- hw->aq.asq_last_status);
+ PMD_INIT_LOG(ERR,
+ "DCBX configuration failed, err = %d, aq_err = %d.",
+ ret, hw->aq.asq_last_status);
return -ENOTSUP;
}
}
@@ -9585,7 +10436,8 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev,
static int
i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -9610,7 +10462,7 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT));
I40E_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
return 0;
}
@@ -9618,7 +10470,8 @@ i40e_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40e_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -9740,8 +10593,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_eth_dev_data *dev_data = pf->dev_data;
- uint32_t frame_size = mtu + ETHER_HDR_LEN
- + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
+ uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
int ret = 0;
/* check if mtu is within the allowed range */
@@ -9750,8 +10602,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* mtu setting is forbidden if port is start */
if (dev_data->dev_started) {
- PMD_DRV_LOG(ERR,
- "port %d must be stopped before configuration\n",
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
dev_data->port_id);
return -EBUSY;
}
@@ -9765,3 +10616,230 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return ret;
}
+
+/* Restore ethertype filter */
+static void
+i40e_ethertype_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *f;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags;
+
+ TAILQ_FOREACH(f, ethertype_list, rules) {
+ flags = 0;
+ if (!(f->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (f->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ i40e_aq_add_rem_control_packet_filter(hw,
+ f->input.mac_addr.addr_bytes,
+ f->input.ether_type,
+ flags, pf->main_vsi->seid,
+ f->queue, 1, &stats, NULL);
+ }
+ PMD_DRV_LOG(INFO, "Ethertype filter:"
+ " mac_etype_used = %u, etype_used = %u,"
+ " mac_etype_free = %u, etype_free = %u",
+ stats.mac_etype_used, stats.etype_used,
+ stats.mac_etype_free, stats.etype_free);
+}
+
+/* Restore tunnel filter */
+static void
+i40e_tunnel_filter_restore(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_pf_vf *vf;
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *f;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ bool big_buffer = 0;
+
+ TAILQ_FOREACH(f, tunnel_list, rules) {
+ if (!f->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ vf = &pf->vfs[f->vf_id];
+ vsi = vf->vsi;
+ }
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&f->input.outer_mac,
+ (struct ether_addr *)&cld_filter.element.outer_mac);
+ ether_addr_copy((struct ether_addr *)&f->input.inner_mac,
+ (struct ether_addr *)&cld_filter.element.inner_mac);
+ cld_filter.element.inner_vlan = f->input.inner_vlan;
+ cld_filter.element.flags = f->input.flags;
+ cld_filter.element.tenant_id = f->input.tenant_id;
+ cld_filter.element.queue_number = f->queue;
+ rte_memcpy(cld_filter.general_fields,
+ f->input.general_fields,
+ sizeof(f->input.general_fields));
+
+ if (((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+ ((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+ ((f->input.flags &
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ big_buffer = 1;
+
+ if (big_buffer)
+ i40e_aq_add_cloud_filters_big_buffer(hw,
+ vsi->seid, &cld_filter, 1);
+ else
+ i40e_aq_add_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
+ }
+}
+
+static void
+i40e_filter_restore(struct i40e_pf *pf)
+{
+ i40e_ethertype_filter_restore(pf);
+ i40e_tunnel_filter_restore(pf);
+ i40e_fdir_filter_restore(pf);
+}
+
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
+{
+ if (strcmp(dev->data->drv_name,
+ drv->driver.name))
+ return false;
+
+ return true;
+}
+
+bool
+is_i40e_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_i40e_pmd);
+}
+
+/* Create a QinQ cloud filter
+ *
+ * The Fortville NIC has limited resources for tunnel filters,
+ * so we can only reuse existing filters.
+ *
+ * In step 1 we define which Field Vector fields can be used for
+ * filter types.
+ * As we do not have the inner tag defined as a field,
+ * we have to define it first, by reusing one of L1 entries.
+ *
+ * In step 2 we are replacing one of existing filter types with
+ * a new one for QinQ.
+ * As we reusing L1 and replacing L2, some of the default filter
+ * types will disappear,which depends on L1 and L2 entries we reuse.
+ *
+ * Step 1: Create L1 filter of outer vlan (12b) + inner vlan (12b)
+ *
+ * 1. Create L1 filter of outer vlan (12b) which will be in use
+ * later when we define the cloud filter.
+ * a. Valid_flags.replace_cloud = 0
+ * b. Old_filter = 10 (Stag_Inner_Vlan)
+ * c. New_filter = 0x10
+ * d. TR bit = 0xff (optional, not used here)
+ * e. Buffer – 2 entries:
+ * i. Byte 0 = 8 (outer vlan FV index).
+ * Byte 1 = 0 (rsv)
+ * Byte 2-3 = 0x0fff
+ * ii. Byte 0 = 37 (inner vlan FV index).
+ * Byte 1 =0 (rsv)
+ * Byte 2-3 = 0x0fff
+ *
+ * Step 2:
+ * 2. Create cloud filter using two L1 filters entries: stag and
+ * new filter(outer vlan+ inner vlan)
+ * a. Valid_flags.replace_cloud = 1
+ * b. Old_filter = 1 (instead of outer IP)
+ * c. New_filter = 0x10
+ * d. Buffer – 2 entries:
+ * i. Byte 0 = 0x80 | 7 (valid | Stag).
+ * Byte 1-3 = 0 (rsv)
+ * ii. Byte 8 = 0x80 | 0x10 (valid | new l1 filter step1)
+ * Byte 9-11 = 0 (rsv)
+ */
+static int
+i40e_cloud_filter_qinq_create(struct i40e_pf *pf)
+{
+ int ret = -ENOTSUP;
+ struct i40e_aqc_replace_cloud_filters_cmd filter_replace;
+ struct i40e_aqc_replace_cloud_filters_cmd_buf filter_replace_buf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+
+ /* Init */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L1 filter */
+ filter_replace.old_filter_type =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace.tr_bit = 0;
+
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
+ filter_replace_buf.data[2] = 0xff;
+ filter_replace_buf.data[3] = 0x0f;
+ filter_replace_buf.data[4] =
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ /* Field Vector 12b mask */
+ filter_replace_buf.data[6] = 0xff;
+ filter_replace_buf.data[7] = 0x0f;
+ ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ if (ret != I40E_SUCCESS)
+ return ret;
+
+ /* Apply the second L2 cloud filter */
+ memset(&filter_replace, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd));
+ memset(&filter_replace_buf, 0,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf));
+
+ /* create L2 filter, input for L2 filter will be L1 filter */
+ filter_replace.valid_flags = I40E_AQC_REPLACE_CLOUD_FILTER;
+ filter_replace.old_filter_type = I40E_AQC_ADD_CLOUD_FILTER_OIP;
+ filter_replace.new_filter_type = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+
+ /* Prepare the buffer, 2 entries */
+ filter_replace_buf.data[0] = I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG;
+ filter_replace_buf.data[0] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ filter_replace_buf.data[4] = I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ;
+ filter_replace_buf.data[4] |=
+ I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED;
+ ret = i40e_aq_replace_cloud_filters(hw, &filter_replace,
+ &filter_replace_buf);
+ return ret;
+}
+
+RTE_INIT(i40e_init_log);
+static void
+i40e_init_log(void)
+{
+ i40e_logtype_init = rte_log_register("pmd.i40e.init");
+ if (i40e_logtype_init >= 0)
+ rte_log_set_level(i40e_logtype_init, RTE_LOG_NOTICE);
+ i40e_logtype_driver = rte_log_register("pmd.i40e.driver");
+ if (i40e_logtype_driver >= 0)
+ rte_log_set_level(i40e_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 5f3ecd9a..2ff8282f 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -37,6 +37,8 @@
#include <rte_eth_ctrl.h>
#include <rte_time.h>
#include <rte_kvargs.h>
+#include <rte_hash.h>
+#include <rte_flow_driver.h>
#define I40E_VLAN_TAG_SIZE 4
@@ -126,6 +128,7 @@ enum i40e_flxpld_layer_idx {
#define I40E_FLAG_FDIR (1ULL << 6)
#define I40E_FLAG_VXLAN (1ULL << 7)
#define I40E_FLAG_RSS_AQ_CAPABLE (1ULL << 8)
+#define I40E_FLAG_VF_MAC_BY_PF (1ULL << 9)
#define I40E_FLAG_ALL (I40E_FLAG_RSS | \
I40E_FLAG_DCB | \
I40E_FLAG_VMDQ | \
@@ -134,7 +137,8 @@ enum i40e_flxpld_layer_idx {
I40E_FLAG_HEADER_SPLIT_ENABLED | \
I40E_FLAG_FDIR | \
I40E_FLAG_VXLAN | \
- I40E_FLAG_RSS_AQ_CAPABLE)
+ I40E_FLAG_RSS_AQ_CAPABLE | \
+ I40E_FLAG_VF_MAC_BY_PF)
#define I40E_RSS_OFFLOAD_ALL ( \
ETH_RSS_FRAG_IPV4 | \
@@ -188,6 +192,72 @@ enum i40e_flxpld_layer_idx {
#define FLOATING_VEB_SUPPORTED_FW_MAJ 5
#define FLOATING_VEB_SUPPORTED_FW_MIN 0
+#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4))
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16
+#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \
+ I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT)
+
+#define I40E_INSET_NONE 0x00000000000000000ULL
+
+/* bit0 ~ bit 7 */
+#define I40E_INSET_DMAC 0x0000000000000001ULL
+#define I40E_INSET_SMAC 0x0000000000000002ULL
+#define I40E_INSET_VLAN_OUTER 0x0000000000000004ULL
+#define I40E_INSET_VLAN_INNER 0x0000000000000008ULL
+#define I40E_INSET_VLAN_TUNNEL 0x0000000000000010ULL
+
+/* bit 8 ~ bit 15 */
+#define I40E_INSET_IPV4_SRC 0x0000000000000100ULL
+#define I40E_INSET_IPV4_DST 0x0000000000000200ULL
+#define I40E_INSET_IPV6_SRC 0x0000000000000400ULL
+#define I40E_INSET_IPV6_DST 0x0000000000000800ULL
+#define I40E_INSET_SRC_PORT 0x0000000000001000ULL
+#define I40E_INSET_DST_PORT 0x0000000000002000ULL
+#define I40E_INSET_SCTP_VT 0x0000000000004000ULL
+
+/* bit 16 ~ bit 31 */
+#define I40E_INSET_IPV4_TOS 0x0000000000010000ULL
+#define I40E_INSET_IPV4_PROTO 0x0000000000020000ULL
+#define I40E_INSET_IPV4_TTL 0x0000000000040000ULL
+#define I40E_INSET_IPV6_TC 0x0000000000080000ULL
+#define I40E_INSET_IPV6_FLOW 0x0000000000100000ULL
+#define I40E_INSET_IPV6_NEXT_HDR 0x0000000000200000ULL
+#define I40E_INSET_IPV6_HOP_LIMIT 0x0000000000400000ULL
+#define I40E_INSET_TCP_FLAGS 0x0000000000800000ULL
+
+/* bit 32 ~ bit 47, tunnel fields */
+#define I40E_INSET_TUNNEL_IPV4_DST 0x0000000100000000ULL
+#define I40E_INSET_TUNNEL_IPV6_DST 0x0000000200000000ULL
+#define I40E_INSET_TUNNEL_DMAC 0x0000000400000000ULL
+#define I40E_INSET_TUNNEL_SRC_PORT 0x0000000800000000ULL
+#define I40E_INSET_TUNNEL_DST_PORT 0x0000001000000000ULL
+#define I40E_INSET_TUNNEL_ID 0x0000002000000000ULL
+
+/* bit 48 ~ bit 55 */
+#define I40E_INSET_LAST_ETHER_TYPE 0x0001000000000000ULL
+
+/* bit 56 ~ bit 63, Flex Payload */
+#define I40E_INSET_FLEX_PAYLOAD_W1 0x0100000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W2 0x0200000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W3 0x0400000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W4 0x0800000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W5 0x1000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W6 0x2000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W7 0x4000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD_W8 0x8000000000000000ULL
+#define I40E_INSET_FLEX_PAYLOAD \
+ (I40E_INSET_FLEX_PAYLOAD_W1 | I40E_INSET_FLEX_PAYLOAD_W2 | \
+ I40E_INSET_FLEX_PAYLOAD_W3 | I40E_INSET_FLEX_PAYLOAD_W4 | \
+ I40E_INSET_FLEX_PAYLOAD_W5 | I40E_INSET_FLEX_PAYLOAD_W6 | \
+ I40E_INSET_FLEX_PAYLOAD_W7 | I40E_INSET_FLEX_PAYLOAD_W8)
+
+/**
+ * The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define I40E_ETH_OVERHEAD \
+ (ETHER_HDR_LEN + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE * 2)
+
struct i40e_adapter;
/**
@@ -242,6 +312,7 @@ struct i40e_veb {
uint16_t stats_idx;
struct i40e_eth_stats stats;
uint8_t enabled_tc; /* The traffic class enabled */
+ uint8_t strict_prio_tc; /* bit map of TCs set to strict priority mode */
struct i40e_bw_info bw_info; /* VEB bandwidth information */
};
@@ -300,6 +371,8 @@ struct i40e_vsi {
uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
uint16_t nb_msix; /* The max number of msix vector */
uint8_t enabled_tc; /* The traffic class enabled */
+ uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
+ uint8_t vlan_filter_on; /* The VLAN filter enabled */
struct i40e_bw_info bw_info; /* VSI bandwidth information */
};
@@ -376,6 +449,14 @@ struct i40e_fdir_flex_mask {
};
#define I40E_FILTER_PCTYPE_MAX 64
+#define I40E_MAX_FDIR_FILTER_NUM (1024 * 8)
+
+struct i40e_fdir_filter {
+ TAILQ_ENTRY(i40e_fdir_filter) rules;
+ struct rte_eth_fdir_filter fdir;
+};
+
+TAILQ_HEAD(i40e_fdir_filter_list, i40e_fdir_filter);
/*
* A structure used to define fields of a FDIR related info.
*/
@@ -394,6 +475,122 @@ struct i40e_fdir_info {
*/
struct i40e_fdir_flex_pit flex_set[I40E_MAX_FLXPLD_LAYER * I40E_MAX_FLXPLD_FIED];
struct i40e_fdir_flex_mask flex_mask[I40E_FILTER_PCTYPE_MAX];
+
+ struct i40e_fdir_filter_list fdir_list;
+ struct i40e_fdir_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/* Ethertype filter number HW supports */
+#define I40E_MAX_ETHERTYPE_FILTER_NUM 768
+
+/* Ethertype filter struct */
+struct i40e_ethertype_filter_input {
+ struct ether_addr mac_addr; /* Mac address to match */
+ uint16_t ether_type; /* Ether type to match */
+};
+
+struct i40e_ethertype_filter {
+ TAILQ_ENTRY(i40e_ethertype_filter) rules;
+ struct i40e_ethertype_filter_input input;
+ uint16_t flags; /* Flags from RTE_ETHTYPE_FLAGS_* */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_ethertype_filter_list, i40e_ethertype_filter);
+
+struct i40e_ethertype_rule {
+ struct i40e_ethertype_filter_list ethertype_list;
+ struct i40e_ethertype_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/* Tunnel filter number HW supports */
+#define I40E_MAX_TUNNEL_FILTER_NUM 400
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD0 44
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TEID_WORD1 45
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoUDP 8
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MPLSoGRE 9
+#define I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ 0x10
+#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP 0x11
+#define I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE 0x12
+#define I40E_AQC_ADD_L1_FILTER_TEID_MPLS 0x11
+
+enum i40e_tunnel_iptype {
+ I40E_TUNNEL_IPTYPE_IPV4,
+ I40E_TUNNEL_IPTYPE_IPV6,
+};
+
+/* Tunnel filter struct */
+struct i40e_tunnel_filter_input {
+ uint8_t outer_mac[6]; /* Outer mac address to match */
+ uint8_t inner_mac[6]; /* Inner mac address to match */
+ uint16_t inner_vlan; /* Inner vlan address to match */
+ enum i40e_tunnel_iptype ip_type;
+ uint16_t flags; /* Filter type flag */
+ uint32_t tenant_id; /* Tenant id to match */
+ uint16_t general_fields[32]; /* Big buffer */
+};
+
+struct i40e_tunnel_filter {
+ TAILQ_ENTRY(i40e_tunnel_filter) rules;
+ struct i40e_tunnel_filter_input input;
+ uint8_t is_to_vf; /* 0 - to PF, 1 - to VF */
+ uint16_t vf_id; /* VF id, avaiblable when is_to_vf is 1. */
+ uint16_t queue; /* Queue assigned to when match */
+};
+
+TAILQ_HEAD(i40e_tunnel_filter_list, i40e_tunnel_filter);
+
+struct i40e_tunnel_rule {
+ struct i40e_tunnel_filter_list tunnel_list;
+ struct i40e_tunnel_filter **hash_map;
+ struct rte_hash *hash_table;
+};
+
+/**
+ * Tunnel type.
+ */
+enum i40e_tunnel_type {
+ I40E_TUNNEL_TYPE_NONE = 0,
+ I40E_TUNNEL_TYPE_VXLAN,
+ I40E_TUNNEL_TYPE_GENEVE,
+ I40E_TUNNEL_TYPE_TEREDO,
+ I40E_TUNNEL_TYPE_NVGRE,
+ I40E_TUNNEL_TYPE_IP_IN_GRE,
+ I40E_L2_TUNNEL_TYPE_E_TAG,
+ I40E_TUNNEL_TYPE_MPLSoUDP,
+ I40E_TUNNEL_TYPE_MPLSoGRE,
+ I40E_TUNNEL_TYPE_QINQ,
+ I40E_TUNNEL_TYPE_MAX,
+};
+
+/**
+ * Tunneling Packet filter configuration.
+ */
+struct i40e_tunnel_filter_conf {
+ struct ether_addr outer_mac; /**< Outer MAC address to match. */
+ struct ether_addr inner_mac; /**< Inner MAC address to match. */
+ uint16_t inner_vlan; /**< Inner VLAN to match. */
+ uint32_t outer_vlan; /**< Outer VLAN to match */
+ enum i40e_tunnel_iptype ip_type; /**< IP address type. */
+ /**
+ * Outer destination IP address to match if ETH_TUNNEL_FILTER_OIP
+ * is set in filter_type, or inner destination IP address to match
+ * if ETH_TUNNEL_FILTER_IIP is set in filter_type.
+ */
+ union {
+ uint32_t ipv4_addr; /**< IPv4 address in big endian. */
+ uint32_t ipv6_addr[4]; /**< IPv6 address in big endian. */
+ } ip_addr;
+ /** Flags from ETH_TUNNEL_FILTER_XX - see above. */
+ uint16_t filter_type;
+ enum i40e_tunnel_type tunnel_type; /**< Tunnel Type. */
+ uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */
+ uint16_t queue_id; /**< Queue assigned to if match. */
+ uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */
+ uint16_t vf_id; /**< VF id, avaiblable when is_to_vf is 1. */
};
#define I40E_MIRROR_MAX_ENTRIES_PER_RULE 64
@@ -418,6 +615,17 @@ struct i40e_mirror_rule {
TAILQ_HEAD(i40e_mirror_rule_list, i40e_mirror_rule);
/*
+ * Struct to store flow created.
+ */
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+
+TAILQ_HEAD(i40e_flow_list, rte_flow);
+
+/*
* Structure to store private data specific for PF instance.
*/
struct i40e_pf {
@@ -466,12 +674,17 @@ struct i40e_pf {
struct i40e_vmdq_info *vmdq;
struct i40e_fdir_info fdir; /* flow director info */
+ struct i40e_ethertype_rule ethertype; /* Ethertype filter rule */
+ struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
uint16_t nb_mirror_rule; /* The number of mirror rules */
bool floating_veb; /* The flag to use the floating VEB */
/* The floating enable flag for the specific VF */
bool floating_veb_list[I40E_MAX_VF];
+ struct i40e_flow_list flow_list;
+ bool mpls_replace_flag; /* 1 - MPLS filter replace is done */
+ bool qinq_replace_flag; /* QINQ filter replace is done */
};
enum pending_msg {
@@ -538,6 +751,8 @@ struct i40e_vf {
uint64_t flags;
};
+#define I40E_MAX_PKT_TYPE 256
+
/*
* Structure to store private data for each PF/VF instance.
*/
@@ -562,6 +777,29 @@ struct i40e_adapter {
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
struct rte_timecounter tx_tstamp_tc;
+
+ /* ptype mapping table */
+ uint32_t ptype_tbl[I40E_MAX_PKT_TYPE] __rte_cache_min_aligned;
+};
+
+extern const struct rte_flow_ops i40e_flow_ops;
+
+union i40e_filter_t {
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_fdir_filter fdir_filter;
+ struct rte_eth_tunnel_filter_conf tunnel_filter;
+ struct i40e_tunnel_filter_conf consistent_tunnel_filter;
+};
+
+typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+struct i40e_valid_pattern {
+ enum rte_flow_item_type *items;
+ parse_filter_t parse_filter;
};
int i40e_dev_switch_queues(struct i40e_pf *pf, bool on);
@@ -605,6 +843,7 @@ int i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
int i40e_select_filter_input_set(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf,
enum rte_filter_type filter);
+void i40e_fdir_filter_restore(struct i40e_pf *pf);
int i40e_hash_filter_inset_select(struct i40e_hw *hw,
struct rte_eth_input_set_conf *conf);
int i40e_fdir_filter_inset_select(struct i40e_pf *pf,
@@ -616,6 +855,46 @@ void i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_txq_info *qinfo);
+struct i40e_ethertype_filter *
+i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
+ const struct i40e_ethertype_filter_input *input);
+int i40e_sw_ethertype_filter_del(struct i40e_pf *pf,
+ struct i40e_ethertype_filter_input *input);
+int i40e_sw_fdir_filter_del(struct i40e_pf *pf,
+ struct rte_eth_fdir_input *input);
+struct i40e_tunnel_filter *
+i40e_sw_tunnel_filter_lookup(struct i40e_tunnel_rule *tunnel_rule,
+ const struct i40e_tunnel_filter_input *input);
+int i40e_sw_tunnel_filter_del(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_input *input);
+uint64_t i40e_get_default_input_set(uint16_t pctype);
+int i40e_ethertype_filter_set(struct i40e_pf *pf,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *filter,
+ bool add);
+int i40e_dev_tunnel_filter_set(struct i40e_pf *pf,
+ struct rte_eth_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
+int i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
+ struct i40e_tunnel_filter_conf *tunnel_filter,
+ uint8_t add);
+int i40e_fdir_flush(struct rte_eth_dev *dev);
+int i40e_find_all_vlan_for_mac(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *mv_f,
+ int num, struct ether_addr *addr);
+int i40e_remove_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total);
+void i40e_set_vlan_filter(struct i40e_vsi *vsi, uint16_t vlan_id, bool on);
+int i40e_add_macvlan_filters(struct i40e_vsi *vsi,
+ struct i40e_macvlan_filter *filter,
+ int total);
+bool is_i40e_supported(struct rte_eth_dev *dev);
+
+#define I40E_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
/* I40E_DEV_PRIVATE_TO */
#define I40E_DEV_PRIVATE_TO_PF(adapter) \
diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c
index 640d316a..859b5e8f 100644
--- a/drivers/net/i40e/i40e_ethdev_vf.c
+++ b/drivers/net/i40e/i40e_ethdev_vf.c
@@ -55,6 +55,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_dev.h>
@@ -135,10 +136,10 @@ static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
uint16_t tx_queue_id);
static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
uint16_t tx_queue_id);
-static void i40evf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *addr,
- uint32_t index,
- uint32_t pool);
+static int i40evf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr,
+ uint32_t index,
+ uint32_t pool);
static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
struct rte_eth_rss_reta_entry64 *reta_conf,
@@ -151,6 +152,9 @@ static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
+static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
+static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr);
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
static int
@@ -214,6 +218,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
.rx_descriptor_done = i40e_dev_rx_descriptor_done,
+ .rx_descriptor_status = i40e_dev_rx_descriptor_status,
+ .tx_descriptor_status = i40e_dev_tx_descriptor_status,
.tx_queue_setup = i40e_dev_tx_queue_setup,
.tx_queue_release = i40e_dev_tx_queue_release,
.rx_queue_count = i40e_dev_rx_queue_count,
@@ -225,6 +231,8 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = {
.reta_query = i40evf_dev_rss_reta_query,
.rss_hash_update = i40evf_dev_rss_hash_update,
.rss_hash_conf_get = i40evf_dev_rss_hash_conf_get,
+ .mtu_set = i40evf_dev_mtu_set,
+ .mac_addr_set = i40evf_set_default_mac_addr,
};
/*
@@ -640,7 +648,7 @@ i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
return ret;
}
@@ -693,7 +701,7 @@ i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
ret = i40evf_execute_vf_cmd(dev, &args);
if (ret)
PMD_DRV_LOG(ERR, "Failed to execute command of "
- "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT\n");
+ "I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
return ret;
}
@@ -719,7 +727,8 @@ i40evf_config_irq_map(struct rte_eth_dev *dev)
uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
sizeof(struct i40e_virtchnl_vector_map)];
struct i40e_virtchnl_irq_map_info *map_info;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t vector_id;
int i, err;
@@ -846,7 +855,7 @@ i40evf_stop_queues(struct rte_eth_dev *dev)
return 0;
}
-static void
+static int
i40evf_add_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *addr,
__rte_unused uint32_t index,
@@ -864,7 +873,7 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
addr->addr_bytes[0], addr->addr_bytes[1],
addr->addr_bytes[2], addr->addr_bytes[3],
addr->addr_bytes[4], addr->addr_bytes[5]);
- return;
+ return I40E_ERR_INVALID_MAC_ADDR;
}
list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
@@ -883,23 +892,20 @@ i40evf_add_mac_addr(struct rte_eth_dev *dev,
PMD_DRV_LOG(ERR, "fail to execute command "
"OP_ADD_ETHER_ADDRESS");
- return;
+ return err;
}
static void
-i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
+ struct ether_addr *addr)
{
struct i40e_virtchnl_ether_addr_list *list;
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct rte_eth_dev_data *data = dev->data;
- struct ether_addr *addr;
uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
sizeof(struct i40e_virtchnl_ether_addr)];
int err;
struct vf_cmd_info args;
- addr = &(data->mac_addrs[index]);
-
if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
addr->addr_bytes[0], addr->addr_bytes[1],
@@ -926,6 +932,17 @@ i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
return;
}
+static void
+i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct ether_addr *addr;
+
+ addr = &data->mac_addrs[index];
+
+ i40evf_del_mac_addr_by_addr(dev, addr);
+}
+
static int
i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
{
@@ -953,7 +970,7 @@ i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
}
static int
-i40evf_get_statics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+i40evf_get_statistics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
int ret;
struct i40e_eth_stats *pstats = NULL;
@@ -1088,7 +1105,6 @@ static const struct rte_pci_id pci_id_i40evf_map[] = {
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
- { RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV) },
{ .vendor_id = 0, /* sentinel */ },
};
@@ -1182,7 +1198,6 @@ i40evf_init_vf(struct rte_eth_dev *dev)
int i, err, bufsz;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
- struct ether_addr *p_mac_addr;
uint16_t interval =
i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
@@ -1259,9 +1274,8 @@ i40evf_init_vf(struct rte_eth_dev *dev)
vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
/* Store the MAC address configured by host, or generate random one */
- p_mac_addr = (struct ether_addr *)(vf->vsi_res->default_mac_addr);
- if (is_valid_assigned_ether_addr(p_mac_addr)) /* Configured by host */
- ether_addr_copy(p_mac_addr, (struct ether_addr *)hw->mac.addr);
+ if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+ vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
else
eth_random_addr(hw->mac.addr); /* Generate a random one */
@@ -1314,16 +1328,16 @@ i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
switch (pf_msg->event) {
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
break;
case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
vf->link_up = pf_msg->event_data.link_event.link_status;
vf->link_speed = pf_msg->event_data.link_event.link_speed;
break;
case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
- PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event\n");
+ PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
break;
default:
PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
@@ -1385,7 +1399,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
"expect %u, get %u",
vf->pend_cmd, msg_opc);
PMD_DRV_LOG(DEBUG, "adminq response is received,"
- " opcode = %d\n", msg_opc);
+ " opcode = %d", msg_opc);
}
break;
default:
@@ -1409,8 +1423,7 @@ i40evf_handle_aq_msg(struct rte_eth_dev *dev)
* void
*/
static void
-i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+i40evf_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -1423,31 +1436,31 @@ i40evf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
/* No interrupt event indicated */
if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
- PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do\n");
+ PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
goto done;
}
if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
- PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported\n");
+ PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
i40evf_handle_aq_msg(dev);
}
/* Link Status Change interrupt */
if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
- " do nothing\n");
+ " do nothing");
done:
i40evf_enable_irq0(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(dev->intr_handle);
}
static int
i40evf_dev_init(struct rte_eth_dev *eth_dev)
{
- struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(\
- eth_dev->data->dev_private);
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct i40e_hw *hw
+ = I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev);
PMD_INIT_FUNC_TRACE();
@@ -1465,16 +1478,17 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev)
i40e_set_tx_function(eth_dev);
return 0;
}
-
- rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev);
-
- hw->vendor_id = eth_dev->pci_dev->id.vendor_id;
- hw->device_id = eth_dev->pci_dev->id.device_id;
- hw->subsystem_vendor_id = eth_dev->pci_dev->id.subsystem_vendor_id;
- hw->subsystem_device_id = eth_dev->pci_dev->id.subsystem_device_id;
- hw->bus.device = eth_dev->pci_dev->addr.devid;
- hw->bus.func = eth_dev->pci_dev->addr.function;
- hw->hw_addr = (void *)eth_dev->pci_dev->mem_resource[0].addr;
+ i40e_set_default_ptype_table(eth_dev);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
hw->adapter_stopped = 0;
if(i40evf_init_vf(eth_dev) != 0) {
@@ -1530,23 +1544,32 @@ i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
+
+static int eth_i40evf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct i40e_adapter), i40evf_dev_init);
+}
+
+static int eth_i40evf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, i40evf_dev_uninit);
+}
+
/*
* virtual function driver struct
*/
-static struct eth_driver rte_i40evf_pmd = {
- .pci_drv = {
- .id_table = pci_id_i40evf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = i40evf_dev_init,
- .eth_dev_uninit = i40evf_dev_uninit,
- .dev_private_size = sizeof(struct i40e_adapter),
+static struct rte_pci_driver rte_i40evf_pmd = {
+ .id_table = pci_id_i40evf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_i40evf_pci_probe,
+ .remove = eth_i40evf_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio");
static int
i40evf_dev_configure(struct rte_eth_dev *dev)
@@ -1861,7 +1884,8 @@ i40evf_enable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
I40E_WRITE_REG(hw,
@@ -1893,7 +1917,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
if (!rte_intr_allow_others(intr_handle)) {
I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
@@ -1919,7 +1944,8 @@ i40evf_disable_queues_intr(struct rte_eth_dev *dev)
static int
i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t interval =
i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
@@ -1945,7 +1971,7 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
I40EVF_WRITE_FLUSH(hw);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
return 0;
}
@@ -1953,7 +1979,8 @@ i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint16_t msix_intr;
@@ -1997,6 +2024,10 @@ i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
}
list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
+ if (!list) {
+ PMD_DRV_LOG(ERR, "fail to allocate memory");
+ return;
+ }
for (i = begin; i < next_begin; i++) {
addr = &dev->data->mac_addrs[i];
@@ -2033,7 +2064,8 @@ i40evf_dev_start(struct rte_eth_dev *dev)
{
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
PMD_INIT_FUNC_TRACE();
@@ -2057,7 +2089,7 @@ i40evf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (!intr_handle->intr_vec) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -2098,7 +2130,8 @@ err_queue:
static void
i40evf_dev_stop(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -2142,6 +2175,9 @@ i40evf_dev_link_update(struct rte_eth_dev *dev,
case I40E_LINK_SPEED_20GB:
new_link.link_speed = ETH_SPEED_NUM_20G;
break;
+ case I40E_LINK_SPEED_25GB:
+ new_link.link_speed = ETH_SPEED_NUM_25G;
+ break;
case I40E_LINK_SPEED_40GB:
new_link.link_speed = ETH_SPEED_NUM_40G;
break;
@@ -2225,6 +2261,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
memset(dev_info, 0, sizeof(*dev_info));
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
@@ -2285,15 +2322,16 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
static void
i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
- if (i40evf_get_statics(dev, stats))
- PMD_DRV_LOG(ERR, "Get statics failed");
+ if (i40evf_get_statistics(dev, stats))
+ PMD_DRV_LOG(ERR, "Get statistics failed");
}
static void
i40evf_dev_close(struct rte_eth_dev *dev)
{
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_pci_device *pci_dev = dev->pci_dev;
+ struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
i40evf_dev_stop(dev);
hw->adapter_stopped = 1;
@@ -2301,11 +2339,11 @@ i40evf_dev_close(struct rte_eth_dev *dev)
i40evf_reset_vf(hw);
i40e_shutdown_adminq(hw);
/* disable uio intr before callback unregister */
- rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_disable(intr_handle);
/* unregister callback func from eal lib */
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- i40evf_dev_interrupt_handler, (void *)dev);
+ rte_intr_callback_unregister(intr_handle,
+ i40evf_dev_interrupt_handler, dev);
i40evf_disable_irq0(hw);
}
@@ -2382,7 +2420,7 @@ i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
@@ -2421,7 +2459,7 @@ i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != ETH_RSS_RETA_SIZE_64) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number of hardware can "
- "support (%d)\n", reta_size, ETH_RSS_RETA_SIZE_64);
+ "support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
return -EINVAL;
}
@@ -2566,7 +2604,7 @@ i40evf_config_rss(struct i40e_vf *vf)
if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "RSS not configured\n");
+ PMD_DRV_LOG(DEBUG, "RSS not configured");
return 0;
}
@@ -2583,7 +2621,7 @@ i40evf_config_rss(struct i40e_vf *vf)
rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
i40evf_disable_rss(vf);
- PMD_DRV_LOG(DEBUG, "No hash flag is set\n");
+ PMD_DRV_LOG(DEBUG, "No hash flag is set");
return 0;
}
@@ -2643,3 +2681,54 @@ i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
return 0;
}
+
+static int
+i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct rte_eth_dev_data *dev_data = vf->dev_data;
+ uint32_t frame_size = mtu + I40E_ETH_OVERHEAD;
+ int ret = 0;
+
+ /* check if mtu is within the allowed range */
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
+ return -EINVAL;
+
+ /* mtu setting is forbidden if port is start */
+ if (dev_data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev_data->port_id);
+ return -EBUSY;
+ }
+
+ if (frame_size > ETHER_MAX_LEN)
+ dev_data->dev_conf.rxmode.jumbo_frame = 1;
+ else
+ dev_data->dev_conf.rxmode.jumbo_frame = 0;
+
+ dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+ return ret;
+}
+
+static void
+i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ if (!is_valid_assigned_ether_addr(mac_addr)) {
+ PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
+ return;
+ }
+
+ if (is_same_ether_addr(mac_addr, dev->data->mac_addrs))
+ return;
+
+ if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
+ return;
+
+ i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs);
+
+ i40evf_add_mac_addr(dev, mac_addr, 0, 0);
+}
diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c
index 335bf15c..28cc554f 100644
--- a/drivers/net/i40e/i40e_fdir.c
+++ b/drivers/net/i40e/i40e_fdir.c
@@ -119,7 +119,13 @@ static int i40e_fdir_filter_programming(struct i40e_pf *pf,
enum i40e_filter_pctype pctype,
const struct rte_eth_fdir_filter *filter,
bool add);
-static int i40e_fdir_flush(struct rte_eth_dev *dev);
+static int i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter);
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input);
+static int i40e_sw_fdir_filter_insert(struct i40e_pf *pf,
+ struct i40e_fdir_filter *filter);
static int
i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq)
@@ -251,7 +257,7 @@ i40e_fdir_setup(struct i40e_pf *pf)
/* reserve memory for the fdir programming packet */
snprintf(z_name, sizeof(z_name), "%s_%s_%d",
- eth_dev->driver->pci_drv.driver.name,
+ eth_dev->data->drv_name,
I40E_FDIR_MZ_NAME,
eth_dev->data->port_id);
mz = i40e_memzone_reserve(z_name, I40E_FDIR_PKT_LEN, SOCKET_ID_ANY);
@@ -1017,13 +1023,81 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq)
return ret;
}
+static int
+i40e_fdir_filter_convert(const struct rte_eth_fdir_filter *input,
+ struct i40e_fdir_filter *filter)
+{
+ rte_memcpy(&filter->fdir, input, sizeof(struct rte_eth_fdir_filter));
+ return 0;
+}
+
+/* Check if there exists the flow director filter */
+static struct i40e_fdir_filter *
+i40e_sw_fdir_filter_lookup(struct i40e_fdir_info *fdir_info,
+ const struct rte_eth_fdir_input *input)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_table, (const void *)input);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+/* Add a flow director filter into the SW list */
+static int
+i40e_sw_fdir_filter_insert(struct i40e_pf *pf, struct i40e_fdir_filter *filter)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_table,
+ &filter->fdir.input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ fdir_info->hash_map[ret] = filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, filter, rules);
+
+ return 0;
+}
+
+/* Delete a flow director filter from the SW list */
+int
+i40e_sw_fdir_filter_del(struct i40e_pf *pf, struct rte_eth_fdir_input *input)
+{
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *filter;
+ int ret;
+
+ ret = rte_hash_del_key(fdir_info->hash_table, input);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to delete fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+ filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, filter, rules);
+ rte_free(filter);
+
+ return 0;
+}
+
/*
* i40e_add_del_fdir_filter - add or remove a flow director filter.
* @pf: board private structure
* @filter: fdir filter entry
* @add: 0 - delete, 1 - add
*/
-static int
+int
i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
const struct rte_eth_fdir_filter *filter,
bool add)
@@ -1032,6 +1106,9 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
enum i40e_filter_pctype pctype;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter, *node;
+ struct i40e_fdir_filter check_filter; /* Check if the filter exists */
int ret = 0;
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
@@ -1054,6 +1131,22 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
return -EINVAL;
}
+ /* Check if there is the filter in SW list */
+ memset(&check_filter, 0, sizeof(check_filter));
+ i40e_fdir_filter_convert(filter, &check_filter);
+ node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input);
+ if (add && node) {
+ PMD_DRV_LOG(ERR,
+ "Conflict with existing flow director rules!");
+ return -EINVAL;
+ }
+
+ if (!add && !node) {
+ PMD_DRV_LOG(ERR,
+ "There's no corresponding flow firector filter!");
+ return -EINVAL;
+ }
+
memset(pkt, 0, I40E_FDIR_PKT_LEN);
ret = i40e_fdir_construct_pkt(pf, &filter->input, pkt);
@@ -1077,6 +1170,16 @@ i40e_add_del_fdir_filter(struct rte_eth_dev *dev,
pctype);
return ret;
}
+
+ if (add) {
+ fdir_filter = rte_zmalloc("fdir_filter",
+ sizeof(*fdir_filter), 0);
+ rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter));
+ ret = i40e_sw_fdir_filter_insert(pf, fdir_filter);
+ } else {
+ ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input);
+ }
+
return ret;
}
@@ -1220,7 +1323,7 @@ i40e_fdir_filter_programming(struct i40e_pf *pf,
* i40e_fdir_flush - clear all filters of Flow Director table
* @pf: board private structure
*/
-static int
+int
i40e_fdir_flush(struct rte_eth_dev *dev)
{
struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
@@ -1481,3 +1584,30 @@ i40e_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* Restore flow director filter */
+void
+i40e_fdir_filter_restore(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = I40E_VSI_TO_ETH_DEV(pf->main_vsi);
+ struct i40e_fdir_filter_list *fdir_list = &pf->fdir.fdir_list;
+ struct i40e_fdir_filter *f;
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ uint32_t fdstat;
+ uint32_t guarant_cnt; /**< Number of filters in guaranteed spaces. */
+ uint32_t best_cnt; /**< Number of filters in best effort spaces. */
+
+ TAILQ_FOREACH(f, fdir_list, rules)
+ i40e_add_del_fdir_filter(dev, &f->fdir, TRUE);
+
+ fdstat = I40E_READ_REG(hw, I40E_PFQF_FDSTAT);
+ guarant_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT);
+ best_cnt =
+ (uint32_t)((fdstat & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
+ I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
+
+ PMD_DRV_LOG(INFO, "FDIR: Guarant count: %d, Best count: %d",
+ guarant_cnt, best_cnt);
+}
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
new file mode 100644
index 00000000..24e1c658
--- /dev/null
+++ b/drivers/net/i40e/i40e_flow.c
@@ -0,0 +1,2258 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_log.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_eth_ctrl.h>
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "i40e_logs.h"
+#include "base/i40e_type.h"
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+
+#define I40E_IPV4_TC_SHIFT 4
+#define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
+#define I40E_IPV6_FRAG_HEADER 44
+#define I40E_TENANT_ARRAY_NUM 3
+#define I40E_TCI_MASK 0xFFFF
+
+static int i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+static int i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+static int i40e_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter);
+static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter);
+static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter);
+static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error);
+static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter);
+static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter);
+static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
+static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter);
+static int
+i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter);
+
+const struct rte_flow_ops i40e_flow_ops = {
+ .validate = i40e_flow_validate,
+ .create = i40e_flow_create,
+ .destroy = i40e_flow_destroy,
+ .flush = i40e_flow_flush,
+};
+
+union i40e_filter_t cons_filter;
+enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
+
+/* Pattern matched ethertype filter */
+static enum rte_flow_item_type pattern_ethertype[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched flow director filter */
+static enum rte_flow_item_type pattern_fdir_ipv4[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_TCP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_SCTP,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_VXLAN,
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched MPLS */
+static enum rte_flow_item_type pattern_mpls_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_2[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_3[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_mpls_4[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_IPV6,
+ RTE_FLOW_ITEM_TYPE_GRE,
+ RTE_FLOW_ITEM_TYPE_MPLS,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+/* Pattern matched QINQ */
+static enum rte_flow_item_type pattern_qinq_1[] = {
+ RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_END,
+};
+
+static struct i40e_valid_pattern i40e_supported_patterns[] = {
+ /* Ethertype */
+ { pattern_ethertype, i40e_flow_parse_ethertype_filter },
+ /* FDIR */
+ { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
+ { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
+ /* VXLAN */
+ { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
+ { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
+ /* MPLSoUDP & MPLSoGRE */
+ { pattern_mpls_1, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_2, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_3, i40e_flow_parse_mpls_filter },
+ { pattern_mpls_4, i40e_flow_parse_mpls_filter },
+ /* QINQ */
+ { pattern_qinq_1, i40e_flow_parse_qinq_filter },
+};
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index) \
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/* Find the first VOID or non-VOID item pointer */
+static const struct rte_flow_item *
+i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
+{
+ bool is_find;
+
+ while (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ if (is_void)
+ is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
+ else
+ is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
+ if (is_find)
+ break;
+ item++;
+ }
+ return item;
+}
+
+/* Skip all VOID items of the pattern */
+static void
+i40e_pattern_skip_void_item(struct rte_flow_item *items,
+ const struct rte_flow_item *pattern)
+{
+ uint32_t cpy_count = 0;
+ const struct rte_flow_item *pb = pattern, *pe = pattern;
+
+ for (;;) {
+ /* Find a non-void item first */
+ pb = i40e_find_first_item(pb, false);
+ if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
+ pe = pb;
+ break;
+ }
+
+ /* Find a void item */
+ pe = i40e_find_first_item(pb + 1, true);
+
+ cpy_count = pe - pb;
+ rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
+
+ items += cpy_count;
+
+ if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
+ pb = pe;
+ break;
+ }
+
+ pb = pe + 1;
+ }
+ /* Copy the END item. */
+ rte_memcpy(items, pe, sizeof(struct rte_flow_item));
+}
+
+/* Check if the pattern matches a supported item type array */
+static bool
+i40e_match_pattern(enum rte_flow_item_type *item_array,
+ struct rte_flow_item *pattern)
+{
+ struct rte_flow_item *item = pattern;
+
+ while ((*item_array == item->type) &&
+ (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
+ item_array++;
+ item++;
+ }
+
+ return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
+ item->type == RTE_FLOW_ITEM_TYPE_END);
+}
+
+/* Find if there's parse filter function matched */
+static parse_filter_t
+i40e_find_parse_filter_func(struct rte_flow_item *pattern)
+{
+ parse_filter_t parse_filter = NULL;
+ uint8_t i = 0;
+
+ for (; i < RTE_DIM(i40e_supported_patterns); i++) {
+ if (i40e_match_pattern(i40e_supported_patterns[i].items,
+ pattern)) {
+ parse_filter = i40e_supported_patterns[i].parse_filter;
+ break;
+ }
+ }
+
+ return parse_filter;
+}
+
+/* Parse attributes */
+static int
+i40e_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static uint16_t
+i40e_get_outer_vlan(struct rte_eth_dev *dev)
+{
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+ uint64_t reg_r = 0;
+ uint16_t reg_id;
+ uint16_t tpid;
+
+ if (qinq)
+ reg_id = 2;
+ else
+ reg_id = 3;
+
+ i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
+ &reg_r, NULL);
+
+ tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
+
+ return tpid;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
+ * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
+ * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
+ * FF:FF:FF:FF:FF:FF
+ * 5. Ether_type mask should be 0xFFFF.
+ */
+static int
+i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ enum rte_flow_item_type item_type;
+ uint16_t outer_tpid;
+
+ outer_tpid = i40e_get_outer_vlan(dev);
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ /* Get the MAC info. */
+ if (!eth_spec || !eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL ETH spec/mask");
+ return -rte_errno;
+ }
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MAC_addr mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6 ||
+ filter->ether_type == ETHER_TYPE_LLDP ||
+ filter->ether_type == outer_tpid) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type in"
+ " control packet filter.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/* Ethertype action only supports QUEUE or DROP. */
+static int
+i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for"
+ " ethertype_filter.");
+ return -rte_errno;
+ }
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_ethertype_filter *ethertype_filter =
+ &filter->ethertype_filter;
+ int ret;
+
+ ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_ethertype_action(dev, actions, error,
+ ethertype_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported flow type and input set: refer to array
+ * default_inset_table in i40e_ethdev.c.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vf *vf_spec;
+ uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
+ enum i40e_filter_pctype pctype;
+ uint64_t input_set = I40E_INSET_NONE;
+ uint16_t flag_offset;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ uint32_t j;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if (eth_spec || eth_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH spec/mask");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (!ipv4_spec || !ipv4_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv4 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ input_set |= I40E_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TOS;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ input_set |= I40E_INSET_IPV4_PROTO;
+
+ /* Get filter info */
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
+ /* Check if it is fragment. */
+ flag_offset =
+ rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
+ if (flag_offset & IPV4_HDR_OFFSET_MASK ||
+ flag_offset & IPV4_HDR_MF_FLAG)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV4;
+
+ /* Get the filter info */
+ filter->input.flow.ip4_flow.proto =
+ ipv4_spec->hdr.next_proto_id;
+ filter->input.flow.ip4_flow.tos =
+ ipv4_spec->hdr.type_of_service;
+ filter->input.flow.ip4_flow.ttl =
+ ipv4_spec->hdr.time_to_live;
+ filter->input.flow.ip4_flow.src_ip =
+ ipv4_spec->hdr.src_addr;
+ filter->input.flow.ip4_flow.dst_ip =
+ ipv4_spec->hdr.dst_addr;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec =
+ (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_mask =
+ (const struct rte_flow_item_ipv6 *)item->mask;
+ if (!ipv6_spec || !ipv6_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL IPv6 spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ /* SCR and DST address of IPv6 shouldn't be masked */
+ for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
+ if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
+ ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+ }
+
+ input_set |= I40E_INSET_IPV6_SRC;
+ input_set |= I40E_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
+ input_set |= I40E_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ input_set |= I40E_INSET_IPV6_HOP_LIMIT;
+
+ /* Get filter info */
+ filter->input.flow.ipv6_flow.tc =
+ (uint8_t)(ipv6_spec->hdr.vtc_flow <<
+ I40E_IPV4_TC_SHIFT);
+ filter->input.flow.ipv6_flow.proto =
+ ipv6_spec->hdr.proto;
+ filter->input.flow.ipv6_flow.hop_limits =
+ ipv6_spec->hdr.hop_limits;
+
+ rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
+ ipv6_spec->hdr.src_addr, 16);
+ rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
+ ipv6_spec->hdr.dst_addr, 16);
+
+ /* Check if it is fragment. */
+ if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
+ flow_type = RTE_ETH_FLOW_FRAG_IPV6;
+ else
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!tcp_spec || !tcp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL TCP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port != UINT16_MAX ||
+ tcp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.tcp4_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp4_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.tcp6_flow.src_port =
+ tcp_spec->hdr.src_port;
+ filter->input.flow.tcp6_flow.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (!udp_spec || !udp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL UDP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port != UINT16_MAX ||
+ udp_mask->hdr.dst_port != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type =
+ RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.udp4_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp4_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.udp6_flow.src_port =
+ udp_spec->hdr.src_port;
+ filter->input.flow.udp6_flow.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (!sctp_spec || !sctp_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "NULL SCTP spec/mask");
+ return -rte_errno;
+ }
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port != UINT16_MAX ||
+ sctp_mask->hdr.dst_port != UINT16_MAX ||
+ sctp_mask->hdr.tag != UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+ input_set |= I40E_INSET_SRC_PORT;
+ input_set |= I40E_INSET_DST_PORT;
+ input_set |= I40E_INSET_SCTP_VT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ filter->input.flow.sctp4_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp4_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp4_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ filter->input.flow.sctp6_flow.src_port =
+ sctp_spec->hdr.src_port;
+ filter->input.flow.sctp6_flow.dst_port =
+ sctp_spec->hdr.dst_port;
+ filter->input.flow.sctp6_flow.verify_tag =
+ sctp_spec->hdr.tag;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VF:
+ vf_spec = (const struct rte_flow_item_vf *)item->spec;
+ filter->input.flow_ext.is_vf = 1;
+ filter->input.flow_ext.dst_id = vf_spec->id;
+ if (filter->input.flow_ext.is_vf &&
+ filter->input.flow_ext.dst_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VF ID for FDIR.");
+ return -rte_errno;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ pctype = i40e_flowtype_to_pctype(flow_type);
+ if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported flow type");
+ return -rte_errno;
+ }
+
+ if (input_set != i40e_get_default_input_set(pctype)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Invalid input set.");
+ return -rte_errno;
+ }
+ filter->input.flow_type = flow_type;
+
+ return 0;
+}
+
+/* Parse to get the action info of a FDIR filter.
+ * FDIR action supports QUEUE or (QUEUE + MARK).
+ */
+static int
+i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct rte_eth_fdir_filter *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->action.flex_off = 0;
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
+ filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
+ else
+ filter->action.behavior = RTE_ETH_FDIR_REJECT;
+
+ filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
+ filter->action.rx_queue = act_q->index;
+
+ if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue ID for FDIR.");
+ return -rte_errno;
+ }
+
+ /* Check if the next non-void item is MARK or END. */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+ act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark_spec = (const struct rte_flow_action_mark *)act->conf;
+ filter->soft_id = mark_spec->id;
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct rte_eth_fdir_filter *fdir_filter =
+ &filter->fdir_filter;
+ int ret;
+
+ ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_FDIR;
+
+ if (dev->data->dev_conf.fdir_conf.mode !=
+ RTE_FDIR_MODE_PERFECT) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Check the mode in fdir_conf.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* Parse to get the action info of a tunnel filter
+ * Tunnel action only supports PF, VF and QUEUE.
+ */
+static int
+i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *actions,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *act_vf;
+ uint32_t index = 0;
+
+ /* Check if the first non-void action is PF or VF. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
+ act->type != RTE_FLOW_ACTION_TYPE_VF) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
+ act_vf = (const struct rte_flow_action_vf *)act->conf;
+ filter->vf_id = act_vf->id;
+ filter->is_to_vf = 1;
+ if (filter->vf_id >= pf->vf_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid VF ID for tunnel filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Check if the next non-void item is QUEUE */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue_id = act_q->index;
+ if ((!filter->is_to_vf) &&
+ (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ } else if (filter->is_to_vf &&
+ (filter->queue_id >= pf->vf_nb_qps)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Invalid queue ID for tunnel filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+i40e_check_tenant_id_mask(const uint8_t *mask)
+{
+ uint32_t j;
+ int is_masked = 0;
+
+ for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
+ if (*(mask + j) == UINT8_MAX) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 0;
+ } else if (*(mask + j) == 0) {
+ if (j > 0 && (*(mask + j) != *(mask + j - 1)))
+ return -EINVAL;
+ is_masked = 1;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return is_masked;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
+ * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_eth *o_eth_spec = NULL;
+ const struct rte_flow_item_eth *o_eth_mask = NULL;
+ const struct rte_flow_item_vxlan *vxlan_spec = NULL;
+ const struct rte_flow_item_vxlan *vxlan_mask = NULL;
+ const struct rte_flow_item_eth *i_eth_spec = NULL;
+ const struct rte_flow_item_eth *i_eth_mask = NULL;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ bool is_vni_masked = 0;
+ enum rte_flow_item_type item_type;
+ bool vxlan_flag = 0;
+ uint32_t tenant_id_be = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ if ((!eth_spec && eth_mask) ||
+ (eth_spec && !eth_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (eth_spec && eth_mask) {
+ /* DST address of inner MAC shouldn't be masked.
+ * SRC address of Inner MAC should be masked.
+ */
+ if (!is_broadcast_ether_addr(&eth_mask->dst) ||
+ !is_zero_ether_addr(&eth_mask->src) ||
+ eth_mask->type) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ether spec/mask");
+ return -rte_errno;
+ }
+
+ if (!vxlan_flag)
+ rte_memcpy(&filter->outer_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ else
+ rte_memcpy(&filter->inner_mac,
+ &eth_spec->dst,
+ ETHER_ADDR_LEN);
+ }
+
+ if (!vxlan_flag) {
+ o_eth_spec = eth_spec;
+ o_eth_mask = eth_mask;
+ } else {
+ i_eth_spec = eth_spec;
+ i_eth_mask = eth_mask;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (vxlan_flag) {
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ } else {
+ if (vlan_spec || vlan_mask)
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* UDP is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan_spec =
+ (const struct rte_flow_item_vxlan *)item->spec;
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ /* Check if VXLAN item is used to describe protocol.
+ * If yes, both spec and mask should be NULL.
+ * If no, either spec or mask shouldn't be NULL.
+ */
+ if ((!vxlan_spec && vxlan_mask) ||
+ (vxlan_spec && !vxlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VXLAN item");
+ return -rte_errno;
+ }
+
+ /* Check if VNI is masked. */
+ if (vxlan_mask) {
+ is_vni_masked =
+ i40e_check_tenant_id_mask(vxlan_mask->vni);
+ if (is_vni_masked < 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid VNI mask");
+ return -rte_errno;
+ }
+ }
+ vxlan_flag = 1;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Check specification and mask to get the filter type */
+ if (vlan_spec && vlan_mask &&
+ (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ /* If there's inner vlan */
+ filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
+ & I40E_TCI_MASK;
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_IVLAN;
+ else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else if ((!vlan_spec && !vlan_mask) ||
+ (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
+ /* If there's no inner vlan */
+ if (vxlan_spec && vxlan_mask && !is_vni_masked) {
+ /* If there's vxlan */
+ rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+ vxlan_spec->vni, 3);
+ filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_IMAC_TENID;
+ else if (o_eth_spec && o_eth_mask &&
+ i_eth_spec && i_eth_mask)
+ filter->filter_type =
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
+ } else if (!vxlan_spec && !vxlan_mask) {
+ /* If there's no vxlan */
+ if (!o_eth_spec && !o_eth_mask &&
+ i_eth_spec && i_eth_mask) {
+ filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Not supported by tunnel filter.");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: MPLS label.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_mpls *mpls_spec;
+ const struct rte_flow_item_mpls *mpls_mask;
+ enum rte_flow_item_type item_type;
+ bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
+ const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
+ uint32_t label_be = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+ /* IPv4 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+ /* IPv6 is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ /* UDP is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP item");
+ return -rte_errno;
+ }
+ is_mplsoudp = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ /* GRE is used to describe protocol,
+ * spec and mask should be NULL.
+ */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GRE item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ mpls_spec =
+ (const struct rte_flow_item_mpls *)item->spec;
+ mpls_mask =
+ (const struct rte_flow_item_mpls *)item->mask;
+
+ if (!mpls_spec || !mpls_mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MPLS item");
+ return -rte_errno;
+ }
+
+ if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid MPLS label mask");
+ return -rte_errno;
+ }
+ rte_memcpy(((uint8_t *)&label_be + 1),
+ mpls_spec->label_tc_s, 3);
+ filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (is_mplsoudp)
+ filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
+ else
+ filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
+
+ return 0;
+}
+
+static int
+i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_mpls_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: QINQ.
+ * 3. Mask of fields which need to be matched should be
+ * filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ * filled with 0.
+ */
+static int
+i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_item *pattern,
+ struct rte_flow_error *error,
+ struct i40e_tunnel_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ const struct rte_flow_item_vlan *vlan_spec = NULL;
+ const struct rte_flow_item_vlan *vlan_mask = NULL;
+ const struct rte_flow_item_vlan *i_vlan_spec = NULL;
+ const struct rte_flow_item_vlan *i_vlan_mask = NULL;
+ const struct rte_flow_item_vlan *o_vlan_spec = NULL;
+ const struct rte_flow_item_vlan *o_vlan_mask = NULL;
+
+ enum rte_flow_item_type item_type;
+ bool vlan_flag = 0;
+
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Not support range");
+ return -rte_errno;
+ }
+ item_type = item->type;
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid ETH item");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan_spec =
+ (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask =
+ (const struct rte_flow_item_vlan *)item->mask;
+
+ if (!(vlan_spec && vlan_mask)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vlan item");
+ return -rte_errno;
+ }
+
+ if (!vlan_flag) {
+ o_vlan_spec = vlan_spec;
+ o_vlan_mask = vlan_mask;
+ vlan_flag = 1;
+ } else {
+ i_vlan_spec = vlan_spec;
+ i_vlan_mask = vlan_mask;
+ vlan_flag = 0;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /* Get filter specification */
+ if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
+ (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
+ filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
+ & I40E_TCI_MASK;
+ filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
+ & I40E_TCI_MASK;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Invalid filter type");
+ return -rte_errno;
+ }
+
+ filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
+ return 0;
+}
+
+static int
+i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ union i40e_filter_t *filter)
+{
+ struct i40e_tunnel_filter_conf *tunnel_filter =
+ &filter->consistent_tunnel_filter;
+ int ret;
+
+ ret = i40e_flow_parse_qinq_pattern(dev, pattern,
+ error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+ if (ret)
+ return ret;
+
+ ret = i40e_flow_parse_attr(attr, error);
+ if (ret)
+ return ret;
+
+ cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+ return ret;
+}
+
+static int
+i40e_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item *items; /* internal pattern w/o VOID items */
+ parse_filter_t parse_filter;
+ uint32_t item_num = 0; /* non-void item number of pattern*/
+ uint32_t i = 0;
+ int ret;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ memset(&cons_filter, 0, sizeof(cons_filter));
+
+ /* Get the non-void item number of pattern */
+ while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
+ if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ item_num++;
+ i++;
+ }
+ item_num++;
+
+ items = rte_zmalloc("i40e_pattern",
+ item_num * sizeof(struct rte_flow_item), 0);
+ if (!items) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "No memory for PMD internal items.");
+ return -ENOMEM;
+ }
+
+ i40e_pattern_skip_void_item(items, pattern);
+
+ /* Find if there's matched parse filter function */
+ parse_filter = i40e_find_parse_filter_func(items);
+ if (!parse_filter) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern, "Unsupported pattern");
+ return -rte_errno;
+ }
+
+ ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
+
+ rte_free(items);
+
+ return ret;
+}
+
+static struct rte_flow *
+i40e_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ struct rte_flow *flow;
+ int ret;
+
+ flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to allocate memory");
+ return flow;
+ }
+
+ ret = i40e_flow_validate(dev, attr, pattern, actions, error);
+ if (ret < 0)
+ return NULL;
+
+ switch (cons_filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_ethertype_filter_set(pf,
+ &cons_filter.ethertype_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
+ i40e_ethertype_filter_list);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &cons_filter.fdir_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
+ i40e_fdir_filter_list);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_dev_consistent_tunnel_filter_set(pf,
+ &cons_filter.consistent_tunnel_filter, 1);
+ if (ret)
+ goto free_flow;
+ flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
+ i40e_tunnel_filter_list);
+ break;
+ default:
+ goto free_flow;
+ }
+
+ flow->filter_type = cons_filter_type;
+ TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
+ return flow;
+
+free_flow:
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(flow);
+ return NULL;
+}
+
+static int
+i40e_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ enum rte_filter_type filter_type = flow->filter_type;
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = i40e_flow_destroy_ethertype_filter(pf,
+ (struct i40e_ethertype_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ ret = i40e_flow_destroy_tunnel_filter(pf,
+ (struct i40e_tunnel_filter *)flow->rule);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = i40e_add_del_fdir_filter(dev,
+ &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ } else
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to destroy flow.");
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
+ struct i40e_ethertype_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
+ struct i40e_ethertype_filter *node;
+ struct i40e_control_filter_stats stats;
+ uint16_t flags = 0;
+ int ret = 0;
+
+ if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
+ flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
+
+ memset(&stats, 0, sizeof(stats));
+ ret = i40e_aq_add_rem_control_packet_filter(hw,
+ filter->input.mac_addr.addr_bytes,
+ filter->input.ether_type,
+ flags, pf->main_vsi->seid,
+ filter->queue, 0, &stats, NULL);
+ if (ret < 0)
+ return ret;
+
+ node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_ethertype_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
+ struct i40e_tunnel_filter *filter)
+{
+ struct i40e_hw *hw = I40E_PF_TO_HW(pf);
+ struct i40e_vsi *vsi;
+ struct i40e_pf_vf *vf;
+ struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
+ struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
+ struct i40e_tunnel_filter *node;
+ bool big_buffer = 0;
+ int ret = 0;
+
+ memset(&cld_filter, 0, sizeof(cld_filter));
+ ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
+ (struct ether_addr *)&cld_filter.element.outer_mac);
+ ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
+ (struct ether_addr *)&cld_filter.element.inner_mac);
+ cld_filter.element.inner_vlan = filter->input.inner_vlan;
+ cld_filter.element.flags = filter->input.flags;
+ cld_filter.element.tenant_id = filter->input.tenant_id;
+ cld_filter.element.queue_number = filter->queue;
+ rte_memcpy(cld_filter.general_fields,
+ filter->input.general_fields,
+ sizeof(cld_filter.general_fields));
+
+ if (!filter->is_to_vf)
+ vsi = pf->main_vsi;
+ else {
+ vf = &pf->vfs[filter->vf_id];
+ vsi = vf->vsi;
+ }
+
+ if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
+ I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
+ ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
+ I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
+ big_buffer = 1;
+
+ if (big_buffer)
+ ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
+ &cld_filter, 1);
+ else
+ ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
+ &cld_filter.element, 1);
+ if (ret < 0)
+ return -ENOTSUP;
+
+ node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
+ if (!node)
+ return -EINVAL;
+
+ ret = i40e_sw_tunnel_filter_del(pf, &node->input);
+
+ return ret;
+}
+
+static int
+i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ int ret;
+
+ ret = i40e_flow_flush_fdir_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush FDIR flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_ethertype_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to ethertype flush flows.");
+ return -rte_errno;
+ }
+
+ ret = i40e_flow_flush_tunnel_filter(pf);
+ if (ret) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to flush tunnel flows.");
+ return -rte_errno;
+ }
+
+ return ret;
+}
+
+static int
+i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
+{
+ struct rte_eth_dev *dev = pf->adapter->eth_dev;
+ struct i40e_fdir_info *fdir_info = &pf->fdir;
+ struct i40e_fdir_filter *fdir_filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret;
+
+ ret = i40e_fdir_flush(dev);
+ if (!ret) {
+ /* Delete FDIR filters in FDIR list. */
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ ret = i40e_sw_fdir_filter_del(pf,
+ &fdir_filter->fdir.input);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Delete FDIR flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all ethertype filters */
+static int
+i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
+{
+ struct i40e_ethertype_filter_list
+ *ethertype_list = &pf->ethertype.ethertype_list;
+ struct i40e_ethertype_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(ethertype_list))) {
+ ret = i40e_flow_destroy_ethertype_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete ethertype flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
+
+/* Flush all tunnel filters */
+static int
+i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
+{
+ struct i40e_tunnel_filter_list
+ *tunnel_list = &pf->tunnel.tunnel_list;
+ struct i40e_tunnel_filter *filter;
+ struct rte_flow *flow;
+ void *temp;
+ int ret = 0;
+
+ while ((filter = TAILQ_FIRST(tunnel_list))) {
+ ret = i40e_flow_destroy_tunnel_filter(pf, filter);
+ if (ret)
+ return ret;
+ }
+
+ /* Delete tunnel flows in flow list. */
+ TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
+ if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
+ TAILQ_REMOVE(&pf->flow_list, flow, node);
+ rte_free(flow);
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/i40e/i40e_logs.h b/drivers/net/i40e/i40e_logs.h
index e042e242..8e99cd52 100644
--- a/drivers/net/i40e/i40e_logs.h
+++ b/drivers/net/i40e/i40e_logs.h
@@ -34,14 +34,11 @@
#ifndef _I40E_LOGS_H_
#define _I40E_LOGS_H_
+extern int i40e_logtype_init;
#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_I40E_DEBUG_INIT
+ rte_log(RTE_LOG_ ## level, i40e_logtype_init, "%s(): " fmt "\n", \
+ __func__, ##args)
#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while(0)
-#endif
#ifdef RTE_LIBRTE_I40E_DEBUG_RX
#define PMD_RX_LOG(level, fmt, args...) \
@@ -64,12 +61,10 @@
#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
#endif
-#ifdef RTE_LIBRTE_I40E_DEBUG_DRIVER
+extern int i40e_logtype_driver;
#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
+ rte_log(RTE_LOG_ ## level, i40e_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
#define PMD_DRV_LOG(level, fmt, args...) \
PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c
index 97b8eccc..0758503e 100644
--- a/drivers/net/i40e/i40e_pf.c
+++ b/drivers/net/i40e/i40e_pf.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -55,6 +55,7 @@
#include "i40e_ethdev.h"
#include "i40e_rxtx.h"
#include "i40e_pf.h"
+#include "rte_pmd_i40e.h"
#define I40E_CFG_CRCSTRIP_DEFAULT 1
@@ -274,14 +275,30 @@ i40e_pf_host_send_msg_to_vf(struct i40e_pf_vf *vf,
}
static void
-i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_version(struct i40e_pf_vf *vf, bool b_op)
{
struct i40e_virtchnl_version_info info;
- info.major = I40E_DPDK_VERSION_MAJOR;
- info.minor = I40E_DPDK_VERSION_MINOR;
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
- I40E_SUCCESS, (uint8_t *)&info, sizeof(info));
+ /* Respond like a Linux PF host in order to support both DPDK VF and
+ * Linux VF driver. The expense is original DPDK host specific feature
+ * like CFG_VLAN_PVID and CONFIG_VSI_QUEUES_EXT will not available.
+ *
+ * DPDK VF also can't identify host driver by version number returned.
+ * It always assume talking with Linux PF.
+ */
+ info.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ info.minor = I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_SUCCESS,
+ (uint8_t *)&info,
+ sizeof(info));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&info,
+ sizeof(info));
}
static int
@@ -294,13 +311,20 @@ i40e_pf_host_process_cmd_reset_vf(struct i40e_pf_vf *vf)
}
static int
-i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf, bool b_op)
{
struct i40e_virtchnl_vf_resource *vf_res = NULL;
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
uint32_t len = 0;
int ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
/* only have 1 VSI by default */
len = sizeof(struct i40e_virtchnl_vf_resource) +
I40E_DEFAULT_VF_VSI_NUM *
@@ -323,8 +347,7 @@ i40e_pf_host_process_cmd_get_vf_resource(struct i40e_pf_vf *vf)
/* Change below setting if PF host can support more VSIs for VF */
vf_res->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
- /* As assume Vf only has single VSI now, always return 0 */
- vf_res->vsi_res[0].vsi_id = 0;
+ vf_res->vsi_res[0].vsi_id = vf->vsi->vsi_id;
vf_res->vsi_res[0].num_queue_pairs = vf->vsi->nb_qps;
ether_addr_copy(&vf->mac_addr,
(struct ether_addr *)vf_res->vsi_res[0].default_mac_addr);
@@ -382,6 +405,29 @@ i40e_pf_host_hmc_config_rxq(struct i40e_hw *hw,
return err;
}
+static inline uint8_t
+i40e_vsi_get_tc_of_queue(struct i40e_vsi *vsi,
+ uint16_t queue_id)
+{
+ struct i40e_aqc_vsi_properties_data *info = &vsi->info;
+ uint16_t bsf, qp_idx;
+ uint8_t i;
+
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & (1 << i)) {
+ qp_idx = rte_le_to_cpu_16((info->tc_mapping[i] &
+ I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >>
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT);
+ bsf = rte_le_to_cpu_16((info->tc_mapping[i] &
+ I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >>
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
+ if (queue_id >= qp_idx && queue_id < qp_idx + (1 << bsf))
+ return i;
+ }
+ }
+ return 0;
+}
+
static int
i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
struct i40e_pf_vf *vf,
@@ -389,16 +435,20 @@ i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
{
int err = I40E_SUCCESS;
struct i40e_hmc_obj_txq tx_ctx;
+ struct i40e_vsi *vsi = vf->vsi;
uint32_t qtx_ctl;
- uint16_t abs_queue_id = vf->vsi->base_queue + txq->queue_id;
-
+ uint16_t abs_queue_id = vsi->base_queue + txq->queue_id;
+ uint8_t dcb_tc;
/* clear the context structure first */
memset(&tx_ctx, 0, sizeof(tx_ctx));
- tx_ctx.new_context = 1;
tx_ctx.base = txq->dma_ring_addr / I40E_QUEUE_BASE_ADDR_UNIT;
tx_ctx.qlen = txq->ring_len;
- tx_ctx.rdylist = rte_le_to_cpu_16(vf->vsi->info.qs_handle[0]);
+ dcb_tc = i40e_vsi_get_tc_of_queue(vsi, txq->queue_id);
+ tx_ctx.rdylist = rte_le_to_cpu_16(vsi->info.qs_handle[dcb_tc]);
+ tx_ctx.head_wb_ena = txq->headwb_enabled;
+ tx_ctx.head_wb_addr = txq->dma_headwb_addr;
+
err = i40e_clear_lan_tx_queue_context(hw, abs_queue_id);
if (err != I40E_SUCCESS)
return err;
@@ -425,7 +475,8 @@ i40e_pf_host_hmc_config_txq(struct i40e_hw *hw,
static int
i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
@@ -434,11 +485,18 @@ i40e_pf_host_process_cmd_config_vsi_queues(struct i40e_pf_vf *vf,
struct i40e_virtchnl_queue_pair_info *vc_qpi;
int i, ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (!msg || vc_vqci->num_queue_pairs > vsi->nb_qps ||
vc_vqci->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci,
vc_vqci->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
@@ -484,7 +542,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_vsi *vsi = vf->vsi;
@@ -493,11 +552,19 @@ i40e_pf_host_process_cmd_config_vsi_queues_ext(struct i40e_pf_vf *vf,
struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
int i, ret = I40E_SUCCESS;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (!msg || vc_vqcei->num_queue_pairs > vsi->nb_qps ||
vc_vqcei->num_queue_pairs > I40E_MAX_VSI_QP ||
msglen < I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei,
vc_vqcei->num_queue_pairs)) {
- PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong\n");
+ PMD_DRV_LOG(ERR, "vsi_queue_config_ext_info argument wrong");
ret = I40E_ERR_PARAM;
goto send_msg;
}
@@ -539,13 +606,125 @@ send_msg:
return ret;
}
+static void
+i40e_pf_config_irq_link_list(struct i40e_pf_vf *vf,
+ struct i40e_virtchnl_vector_map *vvm)
+{
+#define BITS_PER_CHAR 8
+ uint64_t linklistmap = 0, tempmap;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
+ uint16_t qid;
+ bool b_first_q = true;
+ enum i40e_queue_type qtype;
+ uint16_t vector_id;
+ uint32_t reg, reg_idx;
+ uint16_t itr_idx = 0, i;
+
+ vector_id = vvm->vector_id;
+ /* setup the head */
+ if (!vector_id)
+ reg_idx = I40E_VPINT_LNKLST0(vf->vf_idx);
+ else
+ reg_idx = I40E_VPINT_LNKLSTN(
+ ((hw->func_caps.num_msix_vectors_vf - 1) * vf->vf_idx)
+ + (vector_id - 1));
+
+ if (vvm->rxq_map == 0 && vvm->txq_map == 0) {
+ I40E_WRITE_REG(hw, reg_idx,
+ I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK);
+ goto cfg_irq_done;
+ }
+
+ /* sort all rx and tx queues */
+ tempmap = vvm->rxq_map;
+ for (i = 0; i < sizeof(vvm->rxq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i));
+ tempmap >>= 1;
+ }
+
+ tempmap = vvm->txq_map;
+ for (i = 0; i < sizeof(vvm->txq_map) * BITS_PER_CHAR; i++) {
+ if (tempmap & 0x1)
+ linklistmap |= (1 << (2 * i + 1));
+ tempmap >>= 1;
+ }
+
+ /* Link all rx and tx queues into a chained list */
+ tempmap = linklistmap;
+ i = 0;
+ b_first_q = true;
+ do {
+ if (tempmap & 0x1) {
+ qtype = (enum i40e_queue_type)(i % 2);
+ qid = vf->vsi->base_queue + i / 2;
+ if (b_first_q) {
+ /* This is header */
+ b_first_q = false;
+ reg = ((qtype <<
+ I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+ | qid);
+ } else {
+ /* element in the link list */
+ reg = (vector_id) |
+ (qtype << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (qid << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ }
+ I40E_WRITE_REG(hw, reg_idx, reg);
+ /* find next register to program */
+ switch (qtype) {
+ case I40E_QUEUE_TYPE_RX:
+ reg_idx = I40E_QINT_RQCTL(qid);
+ itr_idx = vvm->rxitr_idx;
+ break;
+ case I40E_QUEUE_TYPE_TX:
+ reg_idx = I40E_QINT_TQCTL(qid);
+ itr_idx = vvm->txitr_idx;
+ break;
+ default:
+ break;
+ }
+ }
+ i++;
+ tempmap >>= 1;
+ } while (tempmap);
+
+ /* Terminate the link list */
+ reg = (vector_id) |
+ (0 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ BIT(I40E_QINT_RQCTL_CAUSE_ENA_SHIFT) |
+ (itr_idx << I40E_QINT_RQCTL_ITR_INDX_SHIFT);
+ I40E_WRITE_REG(hw, reg_idx, reg);
+
+cfg_irq_done:
+ I40E_WRITE_FLUSH(hw);
+}
+
static int
i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
- uint8_t *msg, uint16_t msglen)
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
struct i40e_virtchnl_irq_map_info *irqmap =
(struct i40e_virtchnl_irq_map_info *)msg;
+ struct i40e_virtchnl_vector_map *map;
+ int i;
+ uint16_t vector_id;
+ unsigned long qbit_max;
+
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
if (msg == NULL || msglen < sizeof(struct i40e_virtchnl_irq_map_info)) {
PMD_DRV_LOG(ERR, "buffer too short");
@@ -553,23 +732,46 @@ i40e_pf_host_process_cmd_config_irq_map(struct i40e_pf_vf *vf,
goto send_msg;
}
- /* Assume VF only have 1 vector to bind all queues */
- if (irqmap->num_vectors != 1) {
- PMD_DRV_LOG(ERR, "DKDK host only support 1 vector");
- ret = I40E_ERR_PARAM;
+ /* PF host will support both DPDK VF or Linux VF driver, identify by
+ * number of vectors requested.
+ */
+
+ /* DPDK VF only requires single vector */
+ if (irqmap->num_vectors == 1) {
+ /* This MSIX intr store the intr in VF range */
+ vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
+ vf->vsi->nb_msix = irqmap->num_vectors;
+ vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+
+ /* Don't care how the TX/RX queue mapping with this vector.
+ * Link all VF RX queues together. Only did mapping work.
+ * VF can disable/enable the intr by itself.
+ */
+ i40e_vsi_queues_bind_intr(vf->vsi);
goto send_msg;
}
- /* This MSIX intr store the intr in VF range */
- vf->vsi->msix_intr = irqmap->vecmap[0].vector_id;
- vf->vsi->nb_msix = irqmap->num_vectors;
- vf->vsi->nb_used_qps = vf->vsi->nb_qps;
+ /* Then, it's Linux VF driver */
+ qbit_max = 1 << pf->vf_nb_qp_max;
+ for (i = 0; i < irqmap->num_vectors; i++) {
+ map = &irqmap->vecmap[i];
+
+ vector_id = map->vector_id;
+ /* validate msg params */
+ if (vector_id >= hw->func_caps.num_msix_vectors_vf) {
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+
+ if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
+ i40e_pf_config_irq_link_list(vf, map);
+ } else {
+ /* configured queue size excceed limit */
+ ret = I40E_ERR_PARAM;
+ goto send_msg;
+ }
+ }
- /* Don't care how the TX/RX queue mapping with this vector.
- * Link all VF RX queues together. Only did mapping work.
- * VF can disable/enable the intr by itself.
- */
- i40e_vsi_queues_bind_intr(vf->vsi);
send_msg:
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
ret, NULL, 0);
@@ -648,12 +850,21 @@ send_msg:
static int
i40e_pf_host_process_cmd_disable_queues(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_queue_select *q_sel =
(struct i40e_virtchnl_queue_select *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*q_sel)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -671,7 +882,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_ether_addr_list *addr_list =
@@ -680,6 +892,14 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
int i;
struct ether_addr *mac;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
memset(&filter, 0 , sizeof(struct i40e_mac_filter_info));
if (msg == NULL || msglen <= sizeof(*addr_list)) {
@@ -692,8 +912,8 @@ i40e_pf_host_process_cmd_add_ether_address(struct i40e_pf_vf *vf,
mac = (struct ether_addr *)(addr_list->list[i].addr);
(void)rte_memcpy(&filter.mac_addr, mac, ETHER_ADDR_LEN);
filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
- if(!is_valid_assigned_ether_addr(mac) ||
- i40e_vsi_add_mac(vf->vsi, &filter)) {
+ if (is_zero_ether_addr(mac) ||
+ i40e_vsi_add_mac(vf->vsi, &filter)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
}
@@ -709,7 +929,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_ether_addr_list *addr_list =
@@ -717,6 +938,14 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
int i;
struct ether_addr *mac;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*addr_list)) {
PMD_DRV_LOG(ERR, "delete_ether_address argument too short");
ret = I40E_ERR_PARAM;
@@ -725,7 +954,7 @@ i40e_pf_host_process_cmd_del_ether_address(struct i40e_pf_vf *vf,
for (i = 0; i < addr_list->num_elements; i++) {
mac = (struct ether_addr *)(addr_list->list[i].addr);
- if(!is_valid_assigned_ether_addr(mac) ||
+ if(is_zero_ether_addr(mac) ||
i40e_vsi_delete_mac(vf->vsi, mac)) {
ret = I40E_ERR_INVALID_MAC_ADDR;
goto send_msg;
@@ -741,7 +970,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
- uint8_t *msg, uint16_t msglen)
+ uint8_t *msg, uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
@@ -749,6 +979,14 @@ i40e_pf_host_process_cmd_add_vlan(struct i40e_pf_vf *vf,
int i;
uint16_t *vid;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
PMD_DRV_LOG(ERR, "add_vlan argument too short");
ret = I40E_ERR_PARAM;
@@ -773,7 +1011,8 @@ send_msg:
static int
i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_filter_list *vlan_filter_list =
@@ -781,6 +1020,14 @@ i40e_pf_host_process_cmd_del_vlan(struct i40e_pf_vf *vf,
int i;
uint16_t *vid;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen <= sizeof(*vlan_filter_list)) {
PMD_DRV_LOG(ERR, "delete_vlan argument too short");
ret = I40E_ERR_PARAM;
@@ -805,7 +1052,8 @@ static int
i40e_pf_host_process_cmd_config_promisc_mode(
struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_promisc_info *promisc =
@@ -813,6 +1061,14 @@ i40e_pf_host_process_cmd_config_promisc_mode(
struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf);
bool unicast = FALSE, multicast = FALSE;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*promisc)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -838,13 +1094,20 @@ send_msg:
}
static int
-i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf)
+i40e_pf_host_process_cmd_get_stats(struct i40e_pf_vf *vf, bool b_op)
{
i40e_update_vsi_stats(vf->vsi);
- i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_SUCCESS, (uint8_t *)&vf->vsi->eth_stats,
- sizeof(vf->vsi->eth_stats));
+ if (b_op)
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
+ else
+ i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_NOT_SUPPORTED,
+ (uint8_t *)&vf->vsi->eth_stats,
+ sizeof(vf->vsi->eth_stats));
return I40E_SUCCESS;
}
@@ -853,12 +1116,21 @@ static int
i40e_pf_host_process_cmd_cfg_vlan_offload(
struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_vlan_offload_info *offload =
(struct i40e_virtchnl_vlan_offload_info *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*offload)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -879,12 +1151,21 @@ send_msg:
static int
i40e_pf_host_process_cmd_cfg_pvid(struct i40e_pf_vf *vf,
uint8_t *msg,
- uint16_t msglen)
+ uint16_t msglen,
+ bool b_op)
{
int ret = I40E_SUCCESS;
struct i40e_virtchnl_pvid_info *tpid_info =
(struct i40e_virtchnl_pvid_info *)msg;
+ if (!b_op) {
+ i40e_pf_host_send_msg_to_vf(
+ vf,
+ I40E_VIRTCHNL_OP_CFG_VLAN_PVID,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ return ret;
+ }
+
if (msg == NULL || msglen != sizeof(*tpid_info)) {
ret = I40E_ERR_PARAM;
goto send_msg;
@@ -899,7 +1180,7 @@ send_msg:
return ret;
}
-static void
+void
i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
{
struct i40e_virtchnl_pf_event event;
@@ -907,8 +1188,33 @@ i40e_notify_vf_link_status(struct rte_eth_dev *dev, struct i40e_pf_vf *vf)
event.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
event.event_data.link_event.link_status =
dev->data->dev_link.link_status;
- event.event_data.link_event.link_speed =
- (enum i40e_aq_link_speed)dev->data->dev_link.link_speed;
+
+ /* need to convert the ETH_SPEED_xxx into I40E_LINK_SPEED_xxx */
+ switch (dev->data->dev_link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_100MB;
+ break;
+ case ETH_SPEED_NUM_1G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_10GB;
+ break;
+ case ETH_SPEED_NUM_20G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_20GB;
+ break;
+ case ETH_SPEED_NUM_25G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_25GB;
+ break;
+ case ETH_SPEED_NUM_40G:
+ event.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
+ break;
+ default:
+ event.event_data.link_event.link_speed =
+ I40E_LINK_SPEED_UNKNOWN;
+ break;
+ }
+
i40e_pf_host_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_EVENT,
I40E_SUCCESS, (uint8_t *)&event, sizeof(event));
}
@@ -925,6 +1231,8 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
struct i40e_pf_vf *vf;
/* AdminQ will pass absolute VF id, transfer to internal vf id */
uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id;
+ struct rte_pmd_i40e_mb_event_param cb_param;
+ bool b_op = TRUE;
if (vf_id > pf->vf_num - 1 || !pf->vfs) {
PMD_DRV_LOG(ERR, "invalid argument");
@@ -939,10 +1247,35 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
return;
}
+ /**
+ * initialise structure to send to user application
+ * will return response from user in retval field
+ */
+ cb_param.retval = RTE_PMD_I40E_MB_EVENT_PROCEED;
+ cb_param.vfid = vf_id;
+ cb_param.msg_type = opcode;
+ cb_param.msg = (void *)msg;
+ cb_param.msglen = msglen;
+
+ /**
+ * Ask user application if we're allowed to perform those functions.
+ * If we get cb_param.retval == RTE_PMD_I40E_MB_EVENT_PROCEED,
+ * then business as usual.
+ * If RTE_PMD_I40E_MB_EVENT_NOOP_ACK or RTE_PMD_I40E_MB_EVENT_NOOP_NACK,
+ * do nothing and send not_supported to VF. As PF must send a response
+ * to VF and ACK/NACK is not defined.
+ */
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, &cb_param);
+ if (cb_param.retval != RTE_PMD_I40E_MB_EVENT_PROCEED) {
+ PMD_DRV_LOG(WARNING, "VF to PF message(%d) is not permitted!",
+ opcode);
+ b_op = FALSE;
+ }
+
switch (opcode) {
case I40E_VIRTCHNL_OP_VERSION :
PMD_DRV_LOG(INFO, "OP_VERSION received");
- i40e_pf_host_process_cmd_version(vf);
+ i40e_pf_host_process_cmd_version(vf, b_op);
break;
case I40E_VIRTCHNL_OP_RESET_VF :
PMD_DRV_LOG(INFO, "OP_RESET_VF received");
@@ -950,61 +1283,72 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
PMD_DRV_LOG(INFO, "OP_GET_VF_RESOURCES received");
- i40e_pf_host_process_cmd_get_vf_resource(vf);
+ i40e_pf_host_process_cmd_get_vf_resource(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES received");
- i40e_pf_host_process_cmd_config_vsi_queues(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_vsi_queues(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT:
PMD_DRV_LOG(INFO, "OP_CONFIG_VSI_QUEUES_EXT received");
i40e_pf_host_process_cmd_config_vsi_queues_ext(vf, msg,
- msglen);
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
PMD_DRV_LOG(INFO, "OP_CONFIG_IRQ_MAP received");
- i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_irq_map(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_ENABLE_QUEUES received");
- i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
- i40e_notify_vf_link_status(dev, vf);
+ if (b_op) {
+ i40e_pf_host_process_cmd_enable_queues(vf, msg, msglen);
+ i40e_notify_vf_link_status(dev, vf);
+ } else {
+ i40e_pf_host_send_msg_to_vf(
+ vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_NOT_SUPPORTED, NULL, 0);
+ }
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
PMD_DRV_LOG(INFO, "OP_DISABLE_QUEUE received");
- i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen);
+ i40e_pf_host_process_cmd_disable_queues(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_ADD_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_add_ether_address(vf, msg, msglen);
+ i40e_pf_host_process_cmd_add_ether_address(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
PMD_DRV_LOG(INFO, "OP_DEL_ETHER_ADDRESS received");
- i40e_pf_host_process_cmd_del_ether_address(vf, msg, msglen);
+ i40e_pf_host_process_cmd_del_ether_address(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_ADD_VLAN:
PMD_DRV_LOG(INFO, "OP_ADD_VLAN received");
- i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen);
+ i40e_pf_host_process_cmd_add_vlan(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_DEL_VLAN:
PMD_DRV_LOG(INFO, "OP_DEL_VLAN received");
- i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen);
+ i40e_pf_host_process_cmd_del_vlan(vf, msg, msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
PMD_DRV_LOG(INFO, "OP_CONFIG_PROMISCUOUS_MODE received");
- i40e_pf_host_process_cmd_config_promisc_mode(vf, msg, msglen);
+ i40e_pf_host_process_cmd_config_promisc_mode(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_GET_STATS:
PMD_DRV_LOG(INFO, "OP_GET_STATS received");
- i40e_pf_host_process_cmd_get_stats(vf);
+ i40e_pf_host_process_cmd_get_stats(vf, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_OFFLOAD received");
- i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg, msglen);
+ i40e_pf_host_process_cmd_cfg_vlan_offload(vf, msg,
+ msglen, b_op);
break;
case I40E_VIRTCHNL_OP_CFG_VLAN_PVID:
PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received");
- i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen);
+ i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen, b_op);
break;
/* Don't add command supported below, which will
* return an error code.
@@ -1055,9 +1399,9 @@ i40e_pf_host_init(struct rte_eth_dev *dev)
ret = i40e_pf_host_vf_reset(&pf->vfs[i], 0);
if (ret != I40E_SUCCESS)
goto fail;
- eth_random_addr(pf->vfs[i].mac_addr.addr_bytes);
}
+ RTE_ETH_DEV_SRIOV(dev).active = pf->vf_num;
/* restore irq0 */
i40e_pf_enable_irq0(hw);
diff --git a/drivers/net/i40e/i40e_pf.h b/drivers/net/i40e/i40e_pf.h
index 244bac37..b4c22876 100644
--- a/drivers/net/i40e/i40e_pf.h
+++ b/drivers/net/i40e/i40e_pf.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -118,5 +118,7 @@ void i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev,
uint8_t *msg, uint16_t msglen);
int i40e_pf_host_init(struct rte_eth_dev *dev);
int i40e_pf_host_uninit(struct rte_eth_dev *dev);
+void i40e_notify_vf_link_status(struct rte_eth_dev *dev,
+ struct i40e_pf_vf *vf);
#endif /* _I40E_PF_H_ */
diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c
index 1b25b2f2..351cb94d 100644
--- a/drivers/net/i40e/i40e_rxtx.c
+++ b/drivers/net/i40e/i40e_rxtx.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -50,6 +50,8 @@
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_udp.h>
+#include <rte_ip.h>
+#include <rte_net.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
@@ -59,7 +61,6 @@
#define DEFAULT_TX_RS_THRESH 32
#define DEFAULT_TX_FREE_THRESH 32
-#define I40E_MAX_PKT_TYPE 256
#define I40E_TX_MAX_BURST 32
@@ -73,12 +74,31 @@
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#ifdef RTE_LIBRTE_IEEE1588
+#define I40E_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define I40E_TX_IEEE1588_TMST 0
+#endif
+
#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
PKT_TX_OUTER_IP_CKSUM)
+#define I40E_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_TUNNEL_MASK | \
+ I40E_TX_IEEE1588_TMST)
+
+#define I40E_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK)
+
static uint16_t i40e_xmit_pkts_simple(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
@@ -256,7 +276,7 @@ i40e_parse_tunneling_params(uint64_t ol_flags,
*cd_tunneling |= I40E_TXD_CTX_GRE_TUNNELING;
break;
default:
- PMD_TX_LOG(ERR, "Tunnel type not supported\n");
+ PMD_TX_LOG(ERR, "Tunnel type not supported");
return;
}
@@ -412,15 +432,6 @@ check_rx_burst_bulk_alloc_preconditions(__rte_unused struct i40e_rx_queue *rxq)
"rxq->rx_free_thresh=%d",
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc < (I40E_MAX_RING_DESC -
- RTE_PMD_I40E_RX_MAX_BURST))) {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
- "rxq->nb_rx_desc=%d, "
- "I40E_MAX_RING_DESC=%d, "
- "RTE_PMD_I40E_RX_MAX_BURST=%d",
- rxq->nb_rx_desc, I40E_MAX_RING_DESC,
- RTE_PMD_I40E_RX_MAX_BURST);
- ret = -EINVAL;
}
#else
ret = -EINVAL;
@@ -446,6 +457,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
int32_t s[I40E_LOOK_AHEAD], nb_dd;
int32_t i, j, nb_rx = 0;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
rxdp = &rxq->rx_ring[rxq->rx_tail];
rxep = &rxq->sw_ring[rxq->rx_tail];
@@ -494,9 +506,9 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq)
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
mb->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
mb->hash.rss = rte_le_to_cpu_32(\
rxdp[j].wb.qword0.hi_dword.rss);
@@ -584,7 +596,7 @@ i40e_rx_alloc_bufs(struct i40e_rx_queue *rxq)
/* Update rx tail regsiter */
rte_wmb();
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
+ I40E_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
(uint16_t)(rxq->rx_free_trigger + rxq->rx_free_thresh);
@@ -598,6 +610,7 @@ static inline uint16_t
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct i40e_rx_queue *rxq = (struct i40e_rx_queue *)rx_queue;
+ struct rte_eth_dev *dev;
uint16_t nb_rx = 0;
if (!nb_pkts)
@@ -615,9 +628,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
if (i40e_rx_alloc_bufs(rxq) != 0) {
uint16_t i, j;
- PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
- "port_id=%u, queue_id=%u",
- rxq->port_id, rxq->queue_id);
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed +=
+ rxq->rx_free_thresh;
+
rxq->rx_nb_avail = 0;
rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
@@ -679,6 +693,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union i40e_rx_desc rxd;
struct i40e_rx_entry *sw_ring;
struct i40e_rx_entry *rxe;
+ struct rte_eth_dev *dev;
struct rte_mbuf *rxm;
struct rte_mbuf *nmb;
uint16_t nb_rx;
@@ -688,6 +703,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t rx_id, nb_hold;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl;
nb_rx = 0;
nb_hold = 0;
@@ -695,6 +711,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
sw_ring = rxq->sw_ring;
+ ptype_tbl = rxq->vsi->adapter->ptype_tbl;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
@@ -707,10 +724,13 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
- rxd = *rxdp;
+ }
+ rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
rx_id++;
@@ -751,8 +771,8 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
rxm->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
rxm->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
@@ -802,10 +822,12 @@ i40e_recv_scattered_pkts(void *rx_queue,
struct rte_mbuf *nmb, *rxm;
uint16_t rx_id = rxq->rx_tail;
uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
+ struct rte_eth_dev *dev;
uint32_t rx_status;
uint64_t qword1;
uint64_t dma_addr;
uint64_t pkt_flags;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
@@ -818,8 +840,12 @@ i40e_recv_scattered_pkts(void *rx_queue,
break;
nmb = rte_mbuf_raw_alloc(rxq->mp);
- if (unlikely(!nmb))
+ if (unlikely(!nmb)) {
+ dev = I40E_VSI_TO_ETH_DEV(rxq->vsi);
+ dev->data->rx_mbuf_alloc_failed++;
break;
+ }
+
rxd = *rxdp;
nb_hold++;
rxe = &sw_ring[rx_id];
@@ -913,8 +939,8 @@ i40e_recv_scattered_pkts(void *rx_queue,
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1);
first_seg->packet_type =
- i40e_rxd_pkt_type_mapping((uint8_t)((qword1 &
- I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT));
+ ptype_tbl[(uint8_t)((qword1 &
+ I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)];
if (pkt_flags & PKT_RX_RSS_HASH)
first_seg->hash.rss =
rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss);
@@ -1022,7 +1048,6 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
uint16_t nb_tx;
uint32_t td_cmd;
uint32_t td_offset;
- uint32_t tx_flags;
uint32_t td_tag;
uint64_t ol_flags;
uint16_t nb_used;
@@ -1046,7 +1071,6 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
td_cmd = 0;
td_tag = 0;
td_offset = 0;
- tx_flags = 0;
tx_pkt = *tx_pkts++;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
@@ -1093,12 +1117,8 @@ i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Descriptor based VLAN insertion */
if (ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
- tx_flags |= tx_pkt->vlan_tci <<
- I40E_TX_FLAG_L2TAG1_SHIFT;
- tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
- td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
- I40E_TX_FLAG_L2TAG1_SHIFT;
+ td_tag = tx_pkt->vlan_tci;
}
/* Always enable CRC offload insertion */
@@ -1231,7 +1251,7 @@ end_of_tx:
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- I40E_PCI_REG_WRITE(txq->qtx_tail, tx_id);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -1383,7 +1403,7 @@ tx_xmit_pkts(struct i40e_tx_queue *txq,
/* Update the tx tail register */
rte_wmb();
- I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
@@ -1414,6 +1434,85 @@ i40e_xmit_pkts_simple(void *tx_queue,
return nb_tx;
}
+static uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = i40e_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * m->nb_segs is uint8_t, so nb_segs is always less than
+ * I40E_TX_MAX_SEG.
+ * We check only a condition for nb_segs > I40E_TX_MAX_MTU_SEG.
+ */
+ if (!(ol_flags & PKT_TX_TCP_SEG)) {
+ if (m->nb_segs > I40E_TX_MAX_MTU_SEG) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+ } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) ||
+ (m->tso_segsz > I40E_MAX_TSO_MSS)) {
+ /* MSS outside the range (256B - 9674B) are considered
+ * malicious
+ */
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & I40E_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+ return i;
+}
+
/*
* Find the VSI the queue belongs to. 'queue_idx' is the queue index
* application used, which assume having sequential ones. But from driver's
@@ -1701,8 +1800,17 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
/* Allocate the maximun number of RX ring hardware descriptor. */
- ring_size = sizeof(union i40e_rx_desc) * I40E_MAX_RING_DESC;
- ring_size = RTE_ALIGN(ring_size, I40E_DMA_MEM_ALIGN);
+ len = I40E_MAX_RING_DESC;
+
+ /**
+ * Allocating a little more memory because vectorized/bulk_alloc Rx
+ * functions doesn't check boundaries each time.
+ */
+ len += RTE_PMD_I40E_RX_MAX_BURST;
+
+ ring_size = RTE_ALIGN(len * sizeof(union i40e_rx_desc),
+ I40E_DMA_MEM_ALIGN);
+
rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
ring_size, I40E_RING_BASE_ALIGN, socket_id);
if (!rz) {
@@ -1717,11 +1825,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
rxq->rx_ring = (union i40e_rx_desc *)rz->addr;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
len = (uint16_t)(nb_desc + RTE_PMD_I40E_RX_MAX_BURST);
-#else
- len = nb_desc;
-#endif
/* Allocate the software ring. */
rxq->sw_ring =
@@ -1796,11 +1900,6 @@ i40e_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct i40e_rx_queue *rxq;
uint16_t desc = 0;
- if (unlikely(rx_queue_id >= dev->data->nb_rx_queues)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
while ((desc < rxq->nb_rx_desc) &&
@@ -1831,7 +1930,7 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
int ret;
if (unlikely(offset >= rxq->nb_rx_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX queue id %u", offset);
+ PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
return 0;
}
@@ -1849,6 +1948,64 @@ i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
}
int
+i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ volatile uint64_t *status;
+ uint64_t mask;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.qword1.status_error_len;
+ mask = rte_le_to_cpu_64((1ULL << I40E_RX_DESC_STATUS_DD_SHIFT)
+ << I40E_RXD_QW1_STATUS_SHIFT);
+ if (*status & mask)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct i40e_tx_queue *txq = tx_queue;
+ volatile uint64_t *status;
+ uint64_t mask, expect;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].cmd_type_offset_bsz;
+ mask = rte_le_to_cpu_64(I40E_TXD_QW1_DTYPE_MASK);
+ expect = rte_cpu_to_le_64(
+ I40E_TX_DESC_DTYPE_DESC_DONE << I40E_TXD_QW1_DTYPE_SHIFT);
+ if ((*status & mask) == expect)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
+int
i40e_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
@@ -2129,11 +2286,11 @@ i40e_reset_rx_queue(struct i40e_rx_queue *rxq)
for (i = 0; i < len * sizeof(union i40e_rx_desc); i++)
((volatile char *)rxq->rx_ring)[i] = 0;
-#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
for (i = 0; i < RTE_PMD_I40E_RX_MAX_BURST; ++i)
rxq->sw_ring[rxq->nb_rx_desc + i].mbuf = &rxq->fake_mbuf;
+#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
rxq->rx_nb_avail = 0;
rxq->rx_next_avail = 0;
rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
@@ -2765,12 +2922,25 @@ i40e_set_tx_function(struct rte_eth_dev *dev)
PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts_simple;
}
+ dev->tx_pkt_prepare = NULL;
} else {
PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
}
}
+void __attribute__((cold))
+i40e_set_default_ptype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
+}
+
/* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */
int __attribute__((weak))
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
@@ -2815,9 +2985,9 @@ i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
}
uint16_t __attribute__((weak))
-i40e_xmit_pkts_vec(void __rte_unused *tx_queue,
- struct rte_mbuf __rte_unused **tx_pkts,
- uint16_t __rte_unused nb_pkts)
+i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
{
return 0;
}
diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h
index ecdb13cb..20084d64 100644
--- a/drivers/net/i40e/i40e_rxtx.h
+++ b/drivers/net/i40e/i40e_rxtx.h
@@ -34,16 +34,6 @@
#ifndef _I40E_RXTX_H_
#define _I40E_RXTX_H_
-/**
- * 32 bits tx flags, high 16 bits for L2TAG1 (VLAN),
- * low 16 bits for others.
- */
-#define I40E_TX_FLAG_L2TAG1_SHIFT 16
-#define I40E_TX_FLAG_L2TAG1_MASK 0xffff0000
-#define I40E_TX_FLAG_CSUM ((uint32_t)(1 << 0))
-#define I40E_TX_FLAG_INSERT_VLAN ((uint32_t)(1 << 1))
-#define I40E_TX_FLAG_TSYN ((uint32_t)(1 << 2))
-
#define RTE_PMD_I40E_RX_MAX_BURST 32
#define RTE_PMD_I40E_TX_MAX_BURST 32
@@ -63,6 +53,12 @@
#define I40E_MIN_RING_DESC 64
#define I40E_MAX_RING_DESC 4096
+#define I40E_MIN_TSO_MSS 256
+#define I40E_MAX_TSO_MSS 9674
+
+#define I40E_TX_MAX_SEG UINT8_MAX
+#define I40E_TX_MAX_MTU_SEG 8
+
#undef container_of
#define container_of(ptr, type, member) ({ \
typeof(((type *)0)->member)(*__mptr) = (ptr); \
@@ -113,11 +109,11 @@ struct i40e_rx_queue {
uint16_t nb_rx_hold; /**< number of held free RX desc */
struct rte_mbuf *pkt_first_seg; /**< first segment of current packet */
struct rte_mbuf *pkt_last_seg; /**< last segment of current packet */
+ struct rte_mbuf fake_mbuf; /**< dummy mbuf */
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
uint16_t rx_nb_avail; /**< number of staged packets ready */
uint16_t rx_next_avail; /**< index of next staged packets */
uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
- struct rte_mbuf fake_mbuf; /**< dummy mbuf */
struct rte_mbuf *rx_stage[RTE_PMD_I40E_RX_MAX_BURST * 2];
#endif
@@ -223,6 +219,8 @@ uint16_t i40e_recv_scattered_pkts(void *rx_queue,
uint16_t i40e_xmit_pkts(void *tx_queue,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t i40e_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int i40e_tx_queue_init(struct i40e_tx_queue *txq);
int i40e_rx_queue_init(struct i40e_rx_queue *rxq);
void i40e_free_tx_resources(struct i40e_tx_queue *txq);
@@ -238,6 +236,8 @@ void i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq);
uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
uint16_t i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
@@ -248,19 +248,20 @@ int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev);
int i40e_rxq_vec_setup(struct i40e_rx_queue *rxq);
int i40e_txq_vec_setup(struct i40e_tx_queue *txq);
void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq);
-uint16_t i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
void i40e_set_rx_function(struct rte_eth_dev *dev);
void i40e_set_tx_function_flag(struct rte_eth_dev *dev,
struct i40e_tx_queue *txq);
void i40e_set_tx_function(struct rte_eth_dev *dev);
+void i40e_set_default_ptype_table(struct rte_eth_dev *dev);
/* For each value it means, datasheet of hardware can tell more details
*
* @note: fix i40e_dev_supported_ptypes_get() if any change here.
*/
static inline uint32_t
-i40e_rxd_pkt_type_mapping(uint8_t ptype)
+i40e_get_default_pkt_type(uint8_t ptype)
{
static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = {
/* L2 types */
diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
new file mode 100644
index 00000000..f4036ea2
--- /dev/null
+++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
@@ -0,0 +1,645 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 IBM Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+
+#include "base/i40e_prototype.h"
+#include "base/i40e_type.h"
+#include "i40e_ethdev.h"
+#include "i40e_rxtx.h"
+#include "i40e_rxtx_vec_common.h"
+
+#include <altivec.h>
+
+#pragma GCC diagnostic ignored "-Wcast-qual"
+
+static inline void
+i40e_rxq_rearm(struct i40e_rx_queue *rxq)
+{
+ int i;
+ uint16_t rx_id;
+ volatile union i40e_rx_desc *rxdp;
+
+ struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
+ struct rte_mbuf *mb0, *mb1;
+
+ vector unsigned long hdr_room = (vector unsigned long){
+ RTE_PKTMBUF_HEADROOM,
+ RTE_PKTMBUF_HEADROOM};
+ vector unsigned long dma_addr0, dma_addr1;
+
+ rxdp = rxq->rx_ring + rxq->rxrearm_start;
+
+ /* Pull 'n' more MBUFs into the software ring */
+ if (rte_mempool_get_bulk(rxq->mp,
+ (void *)rxep,
+ RTE_I40E_RXQ_REARM_THRESH) < 0) {
+ if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
+ rxq->nb_rx_desc) {
+ dma_addr0 = (vector unsigned long){};
+ for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
+ rxep[i].mbuf = &rxq->fake_mbuf;
+ vec_st(dma_addr0, 0,
+ (vector unsigned long *)&rxdp[i].read);
+ }
+ }
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ RTE_I40E_RXQ_REARM_THRESH;
+ return;
+ }
+
+ /* Initialize the mbufs in vector, process 2 mbufs in one loop */
+ for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
+ vector unsigned long vaddr0, vaddr1;
+ uintptr_t p0, p1;
+
+ mb0 = rxep[0].mbuf;
+ mb1 = rxep[1].mbuf;
+
+ /* Flush mbuf with pkt template.
+ * Data to be rearmed is 6 bytes long.
+ * Though, RX will overwrite ol_flags that are coming next
+ * anyway. So overwrite whole 8 bytes with one load:
+ * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
+ */
+ p0 = (uintptr_t)&mb0->rearm_data;
+ *(uint64_t *)p0 = rxq->mbuf_initializer;
+ p1 = (uintptr_t)&mb1->rearm_data;
+ *(uint64_t *)p1 = rxq->mbuf_initializer;
+
+ /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
+ vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
+ vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
+
+ /* convert pa to dma_addr hdr/data */
+ dma_addr0 = vec_mergel(vaddr0, vaddr0);
+ dma_addr1 = vec_mergel(vaddr1, vaddr1);
+
+ /* add headroom to pa values */
+ dma_addr0 = vec_add(dma_addr0, hdr_room);
+ dma_addr1 = vec_add(dma_addr1, hdr_room);
+
+ /* flush desc with pa dma_addr */
+ vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
+ vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
+ }
+
+ rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
+ if (rxq->rxrearm_start >= rxq->nb_rx_desc)
+ rxq->rxrearm_start = 0;
+
+ rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
+
+ rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
+ (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
+
+ /* Update the tail pointer on the NIC */
+ I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+}
+
+static inline void
+desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
+{
+ vector unsigned int vlan0, vlan1, rss, l3_l4e;
+
+ /* mask everything except RSS, flow director and VLAN flags
+ * bit2 is for VLAN tag, bit11 for flow director indication
+ * bit13:12 for RSS indication.
+ */
+ const vector unsigned int rss_vlan_msk = (vector unsigned int){
+ (int32_t)0x1c03804, (int32_t)0x1c03804,
+ (int32_t)0x1c03804, (int32_t)0x1c03804};
+
+ /* map rss and vlan type to rss hash and vlan flag */
+ const vector unsigned char vlan_flags = (vector unsigned char){
+ 0, 0, 0, 0,
+ PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char rss_flags = (vector unsigned char){
+ 0, PKT_RX_FDIR, 0, 0,
+ 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0};
+
+ const vector unsigned char l3_l4e_flags = (vector unsigned char){
+ 0,
+ PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
+ PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ | PKT_RX_IP_CKSUM_BAD,
+ 0, 0, 0, 0, 0, 0, 0, 0};
+
+ vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
+ vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
+ vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
+
+ vlan1 = vec_and(vlan0, rss_vlan_msk);
+ vlan0 = (vector unsigned int)vec_perm(vlan_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&vlan1);
+
+ rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
+ rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
+ *(vector unsigned char *)&rss);
+
+ l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
+ l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
+ (vector unsigned char){},
+ *(vector unsigned char *)&l3_l4e);
+
+ vlan0 = vec_or(vlan0, rss);
+ vlan0 = vec_or(vlan0, l3_l4e);
+
+ rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
+ rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
+ rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
+ rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
+}
+
+#define PKTLEN_SHIFT 10
+
+static inline void
+desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
+{
+ vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
+ vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
+
+ ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
+ ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
+
+ rx_pkts[0]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype0)[0]];
+ rx_pkts[1]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype0)[8]];
+ rx_pkts[2]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype1)[0]];
+ rx_pkts[3]->packet_type =
+ ptype_tbl[(*(vector unsigned char *)&ptype1)[8]];
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+static inline uint16_t
+_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts, uint8_t *split_packet)
+{
+ volatile union i40e_rx_desc *rxdp;
+ struct i40e_rx_entry *sw_ring;
+ uint16_t nb_pkts_recd;
+ int pos;
+ uint64_t var;
+ vector unsigned char shuf_msk;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+
+ vector unsigned short crc_adjust = (vector unsigned short){
+ 0, 0, /* ignore pkt_type field */
+ rxq->crc_len, /* sub crc on pkt_len */
+ 0, /* ignore high-16bits of pkt_len */
+ rxq->crc_len, /* sub crc on data_len */
+ 0, 0, 0 /* ignore non-length fields */
+ };
+ vector unsigned long dd_check, eop_check;
+
+ /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
+
+ /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
+
+ /* Just the act of getting into the function from the application is
+ * going to cost about 7 cycles
+ */
+ rxdp = rxq->rx_ring + rxq->rx_tail;
+
+ rte_prefetch0(rxdp);
+
+ /* See if we need to rearm the RX queue - gives the prefetch a bit
+ * of time to act
+ */
+ if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
+ i40e_rxq_rearm(rxq);
+
+ /* Before we start moving massive data around, check to see if
+ * there is actually a packet available
+ */
+ if (!(rxdp->wb.qword1.status_error_len &
+ rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
+ return 0;
+
+ /* 4 packets DD mask */
+ dd_check = (vector unsigned long){0x0000000100000001ULL,
+ 0x0000000100000001ULL};
+
+ /* 4 packets EOP mask */
+ eop_check = (vector unsigned long){0x0000000200000002ULL,
+ 0x0000000200000002ULL};
+
+ /* mask to shuffle from desc. to mbuf */
+ shuf_msk = (vector unsigned char){
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 0xFF, 0xFF, /* pkt_type set as unknown */
+ 14, 15, /* octet 15~14, low 16 bits pkt_len */
+ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
+ 14, 15, /* octet 15~14, 16 bits data_len */
+ 2, 3, /* octet 2~3, low 16 bits vlan_macip */
+ 4, 5, 6, 7 /* octet 4~7, 32bits rss */
+ };
+
+ /* Cache is empty -> need to scan the buffer rings, but first move
+ * the next 'n' mbufs into the cache
+ */
+ sw_ring = &rxq->sw_ring[rxq->rx_tail];
+
+ /* A. load 4 packet in one loop
+ * [A*. mask out 4 unused dirty field in desc]
+ * B. copy 4 mbuf point from swring to rx_pkts
+ * C. calc the number of DD bits among the 4 packets
+ * [C*. extract the end-of-packet bit, if requested]
+ * D. fill info. from desc to mbuf
+ */
+
+ for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
+ pos += RTE_I40E_DESCS_PER_LOOP,
+ rxdp += RTE_I40E_DESCS_PER_LOOP) {
+ vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
+ vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
+ vector unsigned long mbp1, mbp2; /* two mbuf pointer
+ * in one XMM reg.
+ */
+
+ /* B.1 load 1 mbuf point */
+ mbp1 = *(vector unsigned long *)&sw_ring[pos];
+ /* Read desc statuses backwards to avoid race condition */
+ /* A.1 load 4 pkts desc */
+ descs[3] = *(vector unsigned long *)(rxdp + 3);
+ rte_compiler_barrier();
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos] = mbp1;
+
+ /* B.1 load 1 mbuf point */
+ mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
+
+ descs[2] = *(vector unsigned long *)(rxdp + 2);
+ rte_compiler_barrier();
+ /* B.1 load 2 mbuf point */
+ descs[1] = *(vector unsigned long *)(rxdp + 1);
+ rte_compiler_barrier();
+ descs[0] = *(vector unsigned long *)(rxdp);
+
+ /* B.2 copy 2 mbuf point into rx_pkts */
+ *(vector unsigned long *)&rx_pkts[pos + 2] = mbp2;
+
+ if (split_packet) {
+ rte_mbuf_prefetch_part2(rx_pkts[pos]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+ rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
+ }
+
+ /* avoid compiler reorder optimization */
+ rte_compiler_barrier();
+
+ /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len3 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[3]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ const vector unsigned int len2 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[2]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[3] = (vector unsigned long)len3;
+ descs[2] = (vector unsigned long)len2;
+
+ /* D.1 pkt 3,4 convert format from desc to pktmbuf */
+ pkt_mb4 = vec_perm((vector unsigned char)descs[3],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb3 = vec_perm((vector unsigned char)descs[2],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
+ (vector unsigned short)descs[2]);
+ /* C.1 4=>2 filter staterr info only */
+ sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
+ (vector unsigned short)descs[0]);
+ /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
+ pkt_mb4 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb4, crc_adjust);
+ pkt_mb3 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb3, crc_adjust);
+
+ /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+ const vector unsigned int len1 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[1]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+ const vector unsigned int len0 = vec_sl(
+ vec_ld(0, (vector unsigned int *)&descs[0]),
+ (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
+
+ /* merge the now-aligned packet length fields back in */
+ descs[1] = (vector unsigned long)len1;
+ descs[0] = (vector unsigned long)len0;
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb2 = vec_perm((vector unsigned char)descs[1],
+ (vector unsigned char){}, shuf_msk);
+ pkt_mb1 = vec_perm((vector unsigned char)descs[0],
+ (vector unsigned char){}, shuf_msk);
+
+ /* C.2 get 4 pkts staterr value */
+ staterr = (vector unsigned short)vec_mergeh(
+ sterr_tmp1, sterr_tmp2);
+
+ /* D.3 copy final 3,4 data to rx_pkts */
+ vec_st(pkt_mb4, 0,
+ (vector unsigned char *)&rx_pkts[pos + 3]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb3, 0,
+ (vector unsigned char *)&rx_pkts[pos + 2]
+ ->rx_descriptor_fields1
+ );
+
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb2 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb2, crc_adjust);
+ pkt_mb1 = (vector unsigned char)vec_sub(
+ (vector unsigned short)pkt_mb1, crc_adjust);
+
+ /* C* extract and record EOP bit */
+ if (split_packet) {
+ vector unsigned char eop_shuf_mask =
+ (vector unsigned char){
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x04, 0x0C, 0x00, 0x08
+ };
+
+ /* and with mask to extract bits, flipping 1-0 */
+ vector unsigned char eop_bits = vec_and(
+ (vector unsigned char)vec_nor(staterr, staterr),
+ (vector unsigned char)eop_check);
+ /* the staterr values are not in order, as the count
+ * count of dd bits doesn't care. However, for end of
+ * packet tracking, we do care, so shuffle. This also
+ * compresses the 32-bit values to 8-bit
+ */
+ eop_bits = vec_perm(eop_bits, (vector unsigned char){},
+ eop_shuf_mask);
+ /* store the resulting 32-bit value */
+ *split_packet = (vec_ld(0,
+ (vector unsigned int *)&eop_bits))[0];
+ split_packet += RTE_I40E_DESCS_PER_LOOP;
+
+ /* zero-out next pointers */
+ rx_pkts[pos]->next = NULL;
+ rx_pkts[pos + 1]->next = NULL;
+ rx_pkts[pos + 2]->next = NULL;
+ rx_pkts[pos + 3]->next = NULL;
+ }
+
+ /* C.3 calc available number of desc */
+ staterr = vec_and(staterr, (vector unsigned short)dd_check);
+
+ /* D.3 copy final 1,2 data to rx_pkts */
+ vec_st(pkt_mb2, 0,
+ (vector unsigned char *)&rx_pkts[pos + 1]
+ ->rx_descriptor_fields1
+ );
+ vec_st(pkt_mb1, 0,
+ (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
+ );
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
+ desc_to_olflags_v(descs, &rx_pkts[pos]);
+
+ /* C.4 calc avaialbe number of desc */
+ var = __builtin_popcountll((vec_ld(0,
+ (vector unsigned long *)&staterr)[0]));
+ nb_pkts_recd += var;
+ if (likely(var != RTE_I40E_DESCS_PER_LOOP))
+ break;
+ }
+
+ /* Update our internal tail pointer */
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
+ rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
+ rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
+
+ return nb_pkts_recd;
+}
+
+ /* Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ * numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_rx_queue *rxq = rx_queue;
+ uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+ /* get some new buffers */
+ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+ split_flags);
+ if (nb_bufs == 0)
+ return 0;
+
+ /* happy day case, full burst + no packets to be joined */
+ const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+ if (rxq->pkt_first_seg == NULL &&
+ split_fl64[0] == 0 && split_fl64[1] == 0 &&
+ split_fl64[2] == 0 && split_fl64[3] == 0)
+ return nb_bufs;
+
+ /* reassemble any packets that need reassembly*/
+ unsigned int i = 0;
+
+ if (!rxq->pkt_first_seg) {
+ /* find the first split flag, and only reassemble then*/
+ while (i < nb_bufs && !split_flags[i])
+ i++;
+ if (i == nb_bufs)
+ return nb_bufs;
+ }
+ return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+ &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf *pkt, uint64_t flags)
+{
+ uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+ ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
+ ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+ vector unsigned long descriptor = (vector unsigned long){
+ pkt->buf_physaddr + pkt->data_off, high_qw};
+ *(vector unsigned long *)txdp = descriptor;
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+ struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
+{
+ int i;
+
+ for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+ vtx1(txdp, *pkt, flags);
+}
+
+uint16_t
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+ volatile struct i40e_tx_desc *txdp;
+ struct i40e_tx_entry *txep;
+ uint16_t n, nb_commit, tx_id;
+ uint64_t flags = I40E_TD_CMD;
+ uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+ int i;
+
+ /* cross rx_thresh boundary is not allowed */
+ nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+ if (txq->nb_tx_free < txq->tx_free_thresh)
+ i40e_tx_free_bufs(txq);
+
+ nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+ nb_commit = nb_pkts;
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ tx_id = txq->tx_tail;
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+
+ txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+ n = (uint16_t)(txq->nb_tx_desc - tx_id);
+ if (nb_commit >= n) {
+ tx_backlog_entry(txep, tx_pkts, n);
+
+ for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+ vtx1(txdp, *tx_pkts, flags);
+
+ vtx1(txdp, *tx_pkts++, rs);
+
+ nb_commit = (uint16_t)(nb_commit - n);
+
+ tx_id = 0;
+ txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+ /* avoid reach the end of ring */
+ txdp = &txq->tx_ring[tx_id];
+ txep = &txq->sw_ring[tx_id];
+ }
+
+ tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+ vtx(txdp, tx_pkts, nb_commit, flags);
+
+ tx_id = (uint16_t)(tx_id + nb_commit);
+ if (tx_id > txq->tx_next_rs) {
+ txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+ rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+ I40E_TXD_QW1_CMD_SHIFT);
+ txq->tx_next_rs =
+ (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+ }
+
+ txq->tx_tail = tx_id;
+
+ I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+ return nb_pkts;
+}
+
+void __attribute__((cold))
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
+{
+ _i40e_rx_queue_release_mbufs_vec(rxq);
+}
+
+int __attribute__((cold))
+i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
+{
+ return i40e_rxq_vec_setup_default(rxq);
+}
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
+{
+ return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+ return i40e_rx_vec_dev_conf_condition_check_default(dev);
+}
diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h
index 990520f3..69209668 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_common.h
+++ b/drivers/net/i40e/i40e_rxtx_vec_common.h
@@ -65,9 +65,9 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
start->ol_flags = end->ol_flags;
/* we need to strip crc for the whole packet */
start->pkt_len -= rxq->crc_len;
- if (end->data_len > rxq->crc_len) {
+ if (end->data_len > rxq->crc_len)
end->data_len -= rxq->crc_len;
- } else {
+ else {
/* free up last mbuf */
struct rte_mbuf *secondlast = start;
@@ -78,7 +78,6 @@ reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
end->data_len);
secondlast->next = NULL;
rte_pktmbuf_free_seg(end);
- end = secondlast;
}
pkts[pkt_idx++] = start;
start = end = NULL;
@@ -124,12 +123,12 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
* tx_next_dd - (tx_rs_thresh-1)
*/
txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool)) {
free[nb_free++] = m;
@@ -145,7 +144,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
@@ -225,14 +224,6 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0 ||
- rxmode->hw_ip_checksum != 0)
- return -1;
-#endif
-
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
@@ -243,6 +234,10 @@ i40e_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
if (rxmode->header_split == 1)
return -1;
+ /* no QinQ support */
+ if (rxmode->hw_vlan_extend == 1)
+ return -1;
+
return 0;
#else
RTE_SET_USED(dev);
diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c
index 011c54e0..694e91f3 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_neon.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c
@@ -57,7 +57,6 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
uint64x2_t dma_addr0, dma_addr1;
uint64x2_t zero = vdupq_n_u64(0);
uint64_t paddr;
- uint8x8_t p;
rxdp = rxq->rx_ring + rxq->rxrearm_start;
@@ -77,27 +76,17 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
return;
}
- p = vld1_u8((uint8_t *)&rxq->mbuf_initializer);
-
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
- */
- vst1_u8((uint8_t *)&mb0->rearm_data, p);
paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
dma_addr0 = vdupq_n_u64(paddr);
/* flush desc with pa dma_addr */
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
- vst1_u8((uint8_t *)&mb1->rearm_data, p);
paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM;
dma_addr1 = vdupq_n_u64(paddr);
vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
@@ -116,18 +105,13 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
-
static inline void
-desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4],
+ struct rte_mbuf **rx_pkts)
{
uint32x4_t vlan0, vlan1, rss, l3_l4e;
+ const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
+ uint64x2_t rearm0, rearm1, rearm2, rearm3;
/* mask everything except RSS, flow director and VLAN flags
* bit2 is for VLAN tag, bit11 for flow director indication
@@ -136,6 +120,20 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
const uint32x4_t rss_vlan_msk = {
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
+ const uint32x4_t cksum_mask = {
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_EIP_CKSUM_BAD};
+
/* map rss and vlan type to rss hash and vlan flag */
const uint8x16_t vlan_flags = {
0, 0, 0, 0,
@@ -150,14 +148,16 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
0, 0, 0, 0};
const uint8x16_t l3_l4e_flags = {
- 0,
- PKT_RX_IP_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD,
- PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1,
+ PKT_RX_IP_CKSUM_BAD >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD) >> 1,
+ (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
0, 0, 0, 0, 0, 0, 0, 0};
vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
@@ -177,26 +177,32 @@ desc_to_olflags_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
l3_l4e = vshrq_n_u32(vlan1, 22);
l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
vreinterpretq_u8_u32(l3_l4e)));
-
+ /* then we shift left 1 bit */
+ l3_l4e = vshlq_n_u32(l3_l4e, 1);
+ /* we need to mask out the reduntant bits */
+ l3_l4e = vandq_u32(l3_l4e, cksum_mask);
vlan0 = vorrq_u32(vlan0, rss);
vlan0 = vorrq_u32(vlan0, l3_l4e);
- rx_pkts[0]->ol_flags = vgetq_lane_u32(vlan0, 0);
- rx_pkts[1]->ol_flags = vgetq_lane_u32(vlan0, 1);
- rx_pkts[2]->ol_flags = vgetq_lane_u32(vlan0, 2);
- rx_pkts[3]->ol_flags = vgetq_lane_u32(vlan0, 3);
+ rearm0 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 0), mbuf_init, 1);
+ rearm1 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 1), mbuf_init, 1);
+ rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1);
+ rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1);
+
+ vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0);
+ vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1);
+ vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2);
+ vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3);
}
-#else
-#define desc_to_olflags_v(descs, rx_pkts) do {} while (0)
-#endif
#define PKTLEN_SHIFT 10
#define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL
static inline void
-desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
+desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
{
int i;
uint8_t ptype;
@@ -205,7 +211,7 @@ desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
for (i = 0; i < 4; i++) {
tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
ptype = vgetq_lane_u8(tmp, 8);
- rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(ptype);
+ rx_pkts[i]->packet_type = ptype_tbl[ptype];
}
}
@@ -225,6 +231,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts_recd;
int pos;
uint64_t var;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
/* mask to shuffle from desc. to mbuf */
uint8x16_t shuf_msk = {
@@ -359,7 +366,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sterr_tmp2.val[1]).val[0];
stat = vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
@@ -429,7 +436,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb2);
vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
- desc_to_ptype_v(descs, &rx_pkts[pos]);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK);
nb_pkts_recd += var;
@@ -523,8 +530,8 @@ vtx(volatile struct i40e_tx_desc *txdp,
}
uint16_t
-i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c
index b95cc8e1..3b4a352e 100644
--- a/drivers/net/i40e/i40e_rxtx_vec_sse.c
+++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c
@@ -82,22 +82,10 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
__m128i vaddr0, vaddr1;
- uintptr_t p0, p1;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /* Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
- */
- p0 = (uintptr_t)&mb0->rearm_data;
- *(uint64_t *)p0 = rxq->mbuf_initializer;
- p1 = (uintptr_t)&mb1->rearm_data;
- *(uint64_t *)p1 = rxq->mbuf_initializer;
-
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
@@ -128,17 +116,13 @@ i40e_rxq_rearm(struct i40e_rx_queue *rxq)
I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
-
static inline void
-desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused,
+ struct rte_mbuf **rx_pkts)
{
+ const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+ __m128i rearm0, rearm1, rearm2, rearm3;
+
__m128i vlan0, vlan1, rss, l3_l4e;
/* mask everything except RSS, flow director and VLAN flags
@@ -206,19 +190,30 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
vlan0 = _mm_or_si128(vlan0, rss);
vlan0 = _mm_or_si128(vlan0, l3_l4e);
- rx_pkts[0]->ol_flags = _mm_extract_epi16(vlan0, 0);
- rx_pkts[1]->ol_flags = _mm_extract_epi16(vlan0, 2);
- rx_pkts[2]->ol_flags = _mm_extract_epi16(vlan0, 4);
- rx_pkts[3]->ol_flags = _mm_extract_epi16(vlan0, 6);
+ /*
+ * At this point, we have the 4 sets of flags in the low 16-bits
+ * of each 32-bit value in vlan0.
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vlan0, 4), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, vlan0, 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(vlan0, 4), 0x10);
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
}
-#else
-#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
-#endif
#define PKTLEN_SHIFT 10
static inline void
-desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
+desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
+ uint32_t *ptype_tbl)
{
__m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]);
__m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]);
@@ -226,10 +221,10 @@ desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
ptype0 = _mm_srli_epi64(ptype0, 30);
ptype1 = _mm_srli_epi64(ptype1, 30);
- rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0));
- rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8));
- rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0));
- rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8));
+ rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)];
+ rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)];
+ rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)];
+ rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)];
}
/*
@@ -248,6 +243,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
int pos;
uint64_t var;
__m128i shuf_msk;
+ uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
__m128i crc_adjust = _mm_set_epi16(
0, 0, 0, /* ignore non-length fields */
@@ -320,20 +316,26 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_I40E_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
/* A.1 load 4 pkts desc */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -342,8 +344,10 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
@@ -372,7 +376,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* C.1 4=>2 filter staterr info only */
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
- desc_to_olflags_v(descs, &rx_pkts[pos]);
+ desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
@@ -424,12 +428,6 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
split_packet += RTE_I40E_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
/* C.3 calc available number of desc */
@@ -441,7 +439,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
pkt_mb2);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
- desc_to_ptype_v(descs, &rx_pkts[pos]);
+ desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc avaialbe number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
@@ -536,8 +534,8 @@ vtx(volatile struct i40e_tx_desc *txdp,
}
uint16_t
-i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+i40e_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
volatile struct i40e_tx_desc *txdp;
diff --git a/drivers/net/i40e/rte_pmd_i40e.c b/drivers/net/i40e/rte_pmd_i40e.c
new file mode 100644
index 00000000..f7ce62bb
--- /dev/null
+++ b/drivers/net/i40e/rte_pmd_i40e.c
@@ -0,0 +1,1937 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+
+#include "base/i40e_prototype.h"
+#include "i40e_ethdev.h"
+#include "i40e_pf.h"
+#include "i40e_rxtx.h"
+#include "rte_pmd_i40e.h"
+
+/* The max bandwidth of i40e is 40Gbps. */
+#define I40E_QOS_BW_MAX 40000
+/* The bandwidth should be the multiple of 50Mbps. */
+#define I40E_QOS_BW_GRANULARITY 50
+/* The min bandwidth weight is 1. */
+#define I40E_QOS_BW_WEIGHT_MIN 1
+/* The max bandwidth weight is 127. */
+#define I40E_QOS_BW_WEIGHT_MAX 127
+
+int
+rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ i40e_notify_vf_link_status(dev, &pf->vfs[vf]);
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SECURITY_VALID)) {
+ if (on) {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) ==
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.sec_flags &
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_add_rm_all_vlan_filter(struct i40e_vsi *vsi, uint8_t add)
+{
+ uint32_t j, k;
+ uint16_t vlan_id;
+ struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
+ struct i40e_aqc_add_remove_vlan_element_data vlan_data = {0};
+ int ret;
+
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (!vsi->vfta[j])
+ continue;
+
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (!(vsi->vfta[j] & (1 << k)))
+ continue;
+
+ vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+ if (!vlan_id)
+ continue;
+
+ vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id);
+ if (add)
+ ret = i40e_aq_add_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ else
+ ret = i40e_aq_remove_vlan(hw, vsi->seid,
+ &vlan_data, 1, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR,
+ "Failed to add/rm vlan filter");
+ return ret;
+ }
+ }
+ }
+
+ return I40E_SUCCESS;
+}
+
+int
+rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->vlan_anti_spoof_on == on)
+ return 0; /* already on or off */
+
+ vsi->vlan_anti_spoof_on = on;
+ if (!vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, on);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters.");
+ return -ENOTSUP;
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (on)
+ vsi->info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+ else
+ vsi->info.sec_flags &= ~I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num;
+ enum rte_mac_filter_type filter_type;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* remove all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ vlan_num = vsi->vlan_num;
+ filter_type = f->mac_info.filter_type;
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ if (vlan_num == 0) {
+ PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0");
+ return I40E_ERR_PARAM;
+ }
+ } else if (filter_type == RTE_MAC_PERFECT_MATCH ||
+ filter_type == RTE_MAC_HASH_MATCH)
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+ if (filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_remove_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_restore_mac_filter(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ struct i40e_macvlan_filter *mv_f;
+ int i, vlan_num = 0;
+ int ret = I40E_SUCCESS;
+ void *temp;
+
+ /* restore all the MACs */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp) {
+ if ((f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH)) {
+ /**
+ * If vlan_num is 0, that's the first time to add mac,
+ * set mask for vlan_id 0.
+ */
+ if (vsi->vlan_num == 0) {
+ i40e_set_vlan_filter(vsi, 0, 1);
+ vsi->vlan_num = 1;
+ }
+ vlan_num = vsi->vlan_num;
+ } else if ((f->mac_info.filter_type == RTE_MAC_PERFECT_MATCH) ||
+ (f->mac_info.filter_type == RTE_MAC_HASH_MATCH))
+ vlan_num = 1;
+
+ mv_f = rte_zmalloc("macvlan_data", vlan_num * sizeof(*mv_f), 0);
+ if (!mv_f) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return I40E_ERR_NO_MEMORY;
+ }
+
+ for (i = 0; i < vlan_num; i++) {
+ mv_f[i].filter_type = f->mac_info.filter_type;
+ (void)rte_memcpy(&mv_f[i].macaddr,
+ &f->mac_info.mac_addr,
+ ETH_ADDR_LEN);
+ }
+
+ if (f->mac_info.filter_type == RTE_MACVLAN_PERFECT_MATCH ||
+ f->mac_info.filter_type == RTE_MACVLAN_HASH_MATCH) {
+ ret = i40e_find_all_vlan_for_mac(vsi, mv_f, vlan_num,
+ &f->mac_info.mac_addr);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+ }
+
+ ret = i40e_add_macvlan_filters(vsi, mv_f, vlan_num);
+ if (ret != I40E_SUCCESS) {
+ rte_free(mv_f);
+ return ret;
+ }
+
+ rte_free(mv_f);
+ ret = I40E_SUCCESS;
+ }
+
+ return ret;
+}
+
+static int
+i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on)
+{
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw;
+ int ret;
+
+ if (!vsi)
+ return -EINVAL;
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* Use the FW API if FW >= v5.0 */
+ if (hw->aq.fw_maj_ver < 5) {
+ PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback");
+ return -ENOTSUP;
+ }
+
+ /* Check if it has been already on or off */
+ if (vsi->info.valid_sections &
+ rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID)) {
+ if (on) {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) ==
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB)
+ return 0; /* already on */
+ } else {
+ if ((vsi->info.switch_id &
+ I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) == 0)
+ return 0; /* already off */
+ }
+ }
+
+ /* remove all the MAC and VLAN first */
+ ret = i40e_vsi_rm_mac_filter(vsi);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove MAC filters.");
+ return ret;
+ }
+ if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 0);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Failed to remove VLAN filters.");
+ return ret;
+ }
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ if (on)
+ vsi->info.switch_id |= I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+ else
+ vsi->info.switch_id &= ~I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ return ret;
+ }
+
+ /* add all the MAC and VLAN back */
+ ret = i40e_vsi_restore_mac_filter(vsi);
+ if (ret)
+ return ret;
+ if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) {
+ ret = i40e_add_rm_all_vlan_filter(vsi, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ uint16_t vf_id;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ /* setup PF TX loopback */
+ vsi = pf->main_vsi;
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+
+ /* setup TX loopback for all the VFs */
+ if (!pf->vfs) {
+ /* if no VF, do nothing. */
+ return 0;
+ }
+
+ for (vf_id = 0; vf_id < pf->vf_num; vf_id++) {
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+
+ ret = i40e_vsi_set_tx_loopback(vsi, on);
+ if (ret)
+ return -ENOTSUP;
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ on, NULL, true);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set unicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ on, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set multicast promiscuous mode");
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr)
+{
+ struct i40e_mac_filter *f;
+ struct rte_eth_dev *dev;
+ struct i40e_pf_vf *vf;
+ struct i40e_vsi *vsi;
+ struct i40e_pf *pf;
+ void *temp;
+
+ if (i40e_validate_mac_addr((u8 *)mac_addr) != I40E_SUCCESS)
+ return -EINVAL;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs)
+ return -EINVAL;
+
+ vf = &pf->vfs[vf_id];
+ vsi = vf->vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ ether_addr_copy(mac_addr, &vf->mac_addr);
+
+ /* Remove all existing mac */
+ TAILQ_FOREACH_SAFE(f, &vsi->mac_list, next, temp)
+ i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr);
+
+ return 0;
+}
+
+/* Set vlan strip on/off for specific VF from host */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid argument.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+
+ if (!vsi)
+ return -EINVAL;
+
+ ret = i40e_vsi_config_vlan_stripping(vsi, !!on);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VLAN stripping!");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+ uint16_t vlan_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (vlan_id > ETHER_MAX_VLAN_ID) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0)
+ return -ENODEV;
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi->info.pvid = vlan_id;
+ if (vlan_id > 0)
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_INSERT_PVID;
+ else
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_INSERT_PVID;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+ uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_mac_filter_info filter;
+ struct ether_addr broadcast = {
+ .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} };
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (on) {
+ (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN);
+ filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
+ ret = i40e_vsi_add_mac(vsi, &filter);
+ } else {
+ ret = i40e_vsi_delete_mac(vsi, &broadcast);
+ }
+
+ if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VSI broadcast");
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ struct i40e_vsi_context ctxt;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ if (on) {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ } else {
+ vsi->info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_UNTAGGED;
+ vsi->info.port_vlan_flags &= ~I40E_AQ_VSI_PVLAN_MODE_TAGGED;
+ }
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ (void)rte_memcpy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+ ctxt.seid = vsi->seid;
+
+ hw = I40E_VSI_TO_HW(vsi);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to update VSI params");
+ }
+
+ return ret;
+}
+
+static int
+i40e_vlan_filter_count(struct i40e_vsi *vsi)
+{
+ uint32_t j, k;
+ uint16_t vlan_id;
+ int count = 0;
+
+ for (j = 0; j < I40E_VFTA_SIZE; j++) {
+ if (!vsi->vfta[j])
+ continue;
+
+ for (k = 0; k < I40E_UINT32_BIT_SIZE; k++) {
+ if (!(vsi->vfta[j] & (1 << k)))
+ continue;
+
+ vlan_id = j * I40E_UINT32_BIT_SIZE + k;
+ if (!vlan_id)
+ continue;
+
+ count++;
+ }
+ }
+
+ return count;
+}
+
+int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_hw *hw;
+ struct i40e_vsi *vsi;
+ uint16_t vf_idx;
+ int ret = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (vlan_id > ETHER_MAX_VLAN_ID || !vlan_id) {
+ PMD_DRV_LOG(ERR, "Invalid VLAN ID.");
+ return -EINVAL;
+ }
+
+ if (vf_mask == 0) {
+ PMD_DRV_LOG(ERR, "No VF.");
+ return -EINVAL;
+ }
+
+ if (on > 1) {
+ PMD_DRV_LOG(ERR, "on is should be 0 or 1.");
+ return -EINVAL;
+ }
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+ hw = I40E_PF_TO_HW(pf);
+
+ /**
+ * return -ENODEV if SRIOV not enabled, VF number not configured
+ * or no queue assigned.
+ */
+ if (!hw->func_caps.sr_iov_1_1 || pf->vf_num == 0 ||
+ pf->vf_nb_qps == 0) {
+ PMD_DRV_LOG(ERR, "SRIOV is not enabled or no queue.");
+ return -ENODEV;
+ }
+
+ for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ vsi = pf->vfs[vf_idx].vsi;
+ if (on) {
+ if (!vsi->vlan_filter_on) {
+ vsi->vlan_filter_on = true;
+ i40e_aq_set_vsi_vlan_promisc(hw,
+ vsi->seid,
+ false,
+ NULL);
+ if (!vsi->vlan_anti_spoof_on)
+ i40e_add_rm_all_vlan_filter(
+ vsi, true);
+ }
+ ret = i40e_vsi_add_vlan(vsi, vlan_id);
+ } else {
+ ret = i40e_vsi_delete_vlan(vsi, vlan_id);
+
+ if (!i40e_vlan_filter_count(vsi)) {
+ vsi->vlan_filter_on = false;
+ i40e_aq_set_vsi_vlan_promisc(hw,
+ vsi->seid,
+ true,
+ NULL);
+ }
+ }
+ }
+ }
+
+ if (ret != I40E_SUCCESS) {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Failed to set VF VLAN filter, on = %d", on);
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_i40e_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ i40e_update_vsi_stats(vsi);
+
+ stats->ipackets = vsi->eth_stats.rx_unicast +
+ vsi->eth_stats.rx_multicast +
+ vsi->eth_stats.rx_broadcast;
+ stats->opackets = vsi->eth_stats.tx_unicast +
+ vsi->eth_stats.tx_multicast +
+ vsi->eth_stats.tx_broadcast;
+ stats->ibytes = vsi->eth_stats.rx_bytes;
+ stats->obytes = vsi->eth_stats.tx_bytes;
+ stats->ierrors = vsi->eth_stats.rx_discards;
+ stats->oerrors = vsi->eth_stats.tx_errors + vsi->eth_stats.tx_discards;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_reset_vf_stats(uint8_t port,
+ uint16_t vf_id)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ vsi->offset_loaded = false;
+ i40e_update_vsi_stats(vsi);
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ int ret = 0;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (bw > I40E_QOS_BW_MAX) {
+ PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+ I40E_QOS_BW_MAX);
+ return -EINVAL;
+ }
+
+ if (bw % I40E_QOS_BW_GRANULARITY) {
+ PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+ I40E_QOS_BW_GRANULARITY);
+ return -EINVAL;
+ }
+
+ bw /= I40E_QOS_BW_GRANULARITY;
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* No change. */
+ if (bw == vsi->bw_info.bw_limit) {
+ PMD_DRV_LOG(INFO,
+ "No change for VF max bandwidth. Nothing to do.");
+ return 0;
+ }
+
+ /**
+ * VF bandwidth limitation and TC bandwidth limitation cannot be
+ * enabled in parallel, quit if TC bandwidth limitation is enabled.
+ *
+ * If bw is 0, means disable bandwidth limitation. Then no need to
+ * check TC bandwidth limitation.
+ */
+ if (bw) {
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if ((vsi->enabled_tc & BIT_ULL(i)) &&
+ vsi->bw_info.bw_ets_credits[i])
+ break;
+ }
+ if (i != I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR,
+ "TC max bandwidth has been set on this VF,"
+ " please disable it first.");
+ return -EINVAL;
+ }
+ }
+
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d bandwidth, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ vsi->bw_info.bw_limit = (uint16_t)bw;
+ vsi->bw_info.bw_max = 0;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id,
+ uint8_t tc_num, uint8_t *bw_weight)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_tc_bw_data tc_bw;
+ int ret = 0;
+ int i, j;
+ uint16_t sum;
+ bool b_change = false;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (tc_num > I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+ I40E_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i))
+ sum++;
+ }
+ if (sum != tc_num) {
+ PMD_DRV_LOG(ERR,
+ "Weight should be set for all %d enabled TCs.",
+ sum);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < tc_num; i++) {
+ if (!bw_weight[i]) {
+ PMD_DRV_LOG(ERR,
+ "The weight should be 1 at least.");
+ return -EINVAL;
+ }
+ sum += bw_weight[i];
+ }
+ if (sum != 100) {
+ PMD_DRV_LOG(ERR,
+ "The summary of the TC weight should be 100.");
+ return -EINVAL;
+ }
+
+ /**
+ * Create the configuration for all the TCs.
+ */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ j = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ if (bw_weight[j] !=
+ vsi->bw_info.bw_ets_share_credits[i])
+ b_change = true;
+
+ tc_bw.tc_bw_credits[i] = bw_weight[j];
+ j++;
+ }
+ }
+
+ /* No change. */
+ if (!b_change) {
+ PMD_DRV_LOG(INFO,
+ "No change for TC allocated bandwidth."
+ " Nothing to do.");
+ return 0;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d TC bandwidth weight, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ j = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j];
+ j++;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port, uint16_t vf_id,
+ uint8_t tc_no, uint32_t bw)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_vsi_ets_sla_bw_data tc_bw;
+ int ret = 0;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ if (vf_id >= pf->vf_num || !pf->vfs) {
+ PMD_DRV_LOG(ERR, "Invalid VF ID.");
+ return -EINVAL;
+ }
+
+ vsi = pf->vfs[vf_id].vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ if (bw > I40E_QOS_BW_MAX) {
+ PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.",
+ I40E_QOS_BW_MAX);
+ return -EINVAL;
+ }
+
+ if (bw % I40E_QOS_BW_GRANULARITY) {
+ PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.",
+ I40E_QOS_BW_GRANULARITY);
+ return -EINVAL;
+ }
+
+ bw /= I40E_QOS_BW_GRANULARITY;
+
+ if (tc_no >= I40E_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TC No. should be less than %d.",
+ I40E_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ if (!(vsi->enabled_tc & BIT_ULL(tc_no))) {
+ PMD_DRV_LOG(ERR, "VF %d TC %d isn't enabled.",
+ vf_id, tc_no);
+ return -EINVAL;
+ }
+
+ /* No change. */
+ if (bw == vsi->bw_info.bw_ets_credits[tc_no]) {
+ PMD_DRV_LOG(INFO,
+ "No change for TC max bandwidth. Nothing to do.");
+ return 0;
+ }
+
+ /**
+ * VF bandwidth limitation and TC bandwidth limitation cannot be
+ * enabled in parallel, disable VF bandwidth limitation if it's
+ * enabled.
+ * If bw is 0, means disable bandwidth limitation. Then no need to
+ * care about VF bandwidth limitation configuration.
+ */
+ if (bw && vsi->bw_info.bw_limit) {
+ ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, 0, 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to disable VF(%d)"
+ " bandwidth limitation, err(%d).",
+ vf_id, ret);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "VF max bandwidth is disabled according"
+ " to TC max bandwidth setting.");
+ }
+
+ /**
+ * Get all the TCs' info to create a whole picture.
+ * Because the incremental change isn't permitted.
+ */
+ memset(&tc_bw, 0, sizeof(tc_bw));
+ tc_bw.tc_valid_bits = vsi->enabled_tc;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (vsi->enabled_tc & BIT_ULL(i)) {
+ tc_bw.tc_bw_credits[i] =
+ rte_cpu_to_le_16(
+ vsi->bw_info.bw_ets_credits[i]);
+ }
+ }
+ tc_bw.tc_bw_credits[tc_no] = rte_cpu_to_le_16((uint16_t)bw);
+
+ ret = i40e_aq_config_vsi_ets_sla_bw_limit(hw, vsi->seid, &tc_bw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set VF %d TC %d max bandwidth, err(%d).",
+ vf_id, tc_no, ret);
+ return -EINVAL;
+ }
+
+ /* Store the configuration. */
+ vsi->bw_info.bw_ets_credits[tc_no] = (uint16_t)bw;
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_pf *pf;
+ struct i40e_vsi *vsi;
+ struct i40e_veb *veb;
+ struct i40e_hw *hw;
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ int i;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+
+ vsi = pf->main_vsi;
+ if (!vsi) {
+ PMD_DRV_LOG(ERR, "Invalid VSI.");
+ return -EINVAL;
+ }
+
+ veb = vsi->veb;
+ if (!veb) {
+ PMD_DRV_LOG(ERR, "Invalid VEB.");
+ return -EINVAL;
+ }
+
+ if ((tc_map & veb->enabled_tc) != tc_map) {
+ PMD_DRV_LOG(ERR,
+ "TC bitmap isn't the subset of enabled TCs 0x%x.",
+ veb->enabled_tc);
+ return -EINVAL;
+ }
+
+ if (tc_map == veb->strict_prio_tc) {
+ PMD_DRV_LOG(INFO, "No change for TC bitmap. Nothing to do.");
+ return 0;
+ }
+
+ hw = I40E_VSI_TO_HW(vsi);
+
+ /* Disable DCBx if it's the first time to set strict priority. */
+ if (!veb->strict_prio_tc) {
+ ret = i40e_aq_stop_lldp(hw, true, NULL);
+ if (ret)
+ PMD_DRV_LOG(INFO,
+ "Failed to disable DCBx as it's already"
+ " disabled.");
+ else
+ PMD_DRV_LOG(INFO,
+ "DCBx is disabled according to strict"
+ " priority setting.");
+ }
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ ets_data.tc_valid_bits = veb->enabled_tc;
+ ets_data.seepage = I40E_AQ_ETS_SEEPAGE_EN_MASK;
+ ets_data.tc_strict_priority_flags = tc_map;
+ /* Get all TCs' bandwidth. */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (veb->enabled_tc & BIT_ULL(i)) {
+ /* For rubust, if bandwidth is 0, use 1 instead. */
+ if (veb->bw_info.bw_ets_share_credits[i])
+ ets_data.tc_bw_share_credits[i] =
+ veb->bw_info.bw_ets_share_credits[i];
+ else
+ ets_data.tc_bw_share_credits[i] =
+ I40E_QOS_BW_WEIGHT_MIN;
+ }
+ }
+
+ if (!veb->strict_prio_tc)
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_enable_switching_comp_ets,
+ NULL);
+ else if (tc_map)
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_modify_switching_comp_ets,
+ NULL);
+ else
+ ret = i40e_aq_config_switch_comp_ets(
+ hw, veb->uplink_seid,
+ &ets_data, i40e_aqc_opc_disable_switching_comp_ets,
+ NULL);
+
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to set TCs' strict priority mode."
+ " err (%d)", ret);
+ return -EINVAL;
+ }
+
+ veb->strict_prio_tc = tc_map;
+
+ /* Enable DCBx again, if all the TCs' strict priority disabled. */
+ if (!tc_map) {
+ ret = i40e_aq_start_lldp(hw, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR,
+ "Failed to enable DCBx, err(%d).", ret);
+ return -EINVAL;
+ }
+
+ PMD_DRV_LOG(INFO,
+ "DCBx is enabled again according to strict"
+ " priority setting.");
+ }
+
+ return ret;
+}
+
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+
+static void
+i40e_generate_profile_info_sec(char *name, struct i40e_ddp_version *version,
+ uint32_t track_id, uint8_t *profile_info_sec,
+ bool add)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ struct i40e_profile_info *pinfo;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ memcpy(pinfo->name, name, I40E_DDP_NAME_SIZE);
+ memcpy(&pinfo->version, version, sizeof(struct i40e_ddp_version));
+ if (add)
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+ else
+ pinfo->op = I40E_DDP_REMOVE_TRACKID;
+}
+
+static enum i40e_status_code
+i40e_add_rm_profile_info(struct i40e_hw *hw, uint8_t *profile_info_sec)
+{
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_profile_section_header *sec;
+ uint32_t track_id;
+ uint32_t offset = 0;
+ uint32_t info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ track_id = ((struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset))->track_id;
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ if (status)
+ PMD_DRV_LOG(ERR, "Failed to add/remove profile info: "
+ "offset %d, info %d",
+ offset, info);
+
+ return status;
+}
+
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+
+/* Check if the profile info exists */
+static int
+i40e_check_profile_info(uint8_t port, uint8_t *profile_info_sec)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port];
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint8_t *buff;
+ struct rte_pmd_i40e_profile_list *p_list;
+ struct rte_pmd_i40e_profile_info *pinfo, *p;
+ uint32_t i;
+ int ret;
+
+ buff = rte_zmalloc("pinfo_list",
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+ 0);
+ if (!buff) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return -1;
+ }
+
+ ret = i40e_aq_get_ddp_list(
+ hw, (void *)buff,
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4),
+ 0, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Failed to get profile info list.");
+ rte_free(buff);
+ return -1;
+ }
+ p_list = (struct rte_pmd_i40e_profile_list *)buff;
+ pinfo = (struct rte_pmd_i40e_profile_info *)(profile_info_sec +
+ sizeof(struct i40e_profile_section_header));
+ for (i = 0; i < p_list->p_count; i++) {
+ p = &p_list->p_info[i];
+ if ((pinfo->track_id == p->track_id) &&
+ !memcmp(&pinfo->version, &p->version,
+ sizeof(struct i40e_ddp_version)) &&
+ !memcmp(&pinfo->name, &p->name,
+ I40E_DDP_NAME_SIZE)) {
+ PMD_DRV_LOG(INFO, "Profile exists.");
+ rte_free(buff);
+ return 1;
+ }
+ }
+
+ rte_free(buff);
+ return 0;
+}
+
+int
+rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+ uint32_t size,
+ enum rte_pmd_i40e_package_op op)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ struct i40e_package_header *pkg_hdr;
+ struct i40e_generic_seg_header *profile_seg_hdr;
+ struct i40e_generic_seg_header *metadata_seg_hdr;
+ uint32_t track_id;
+ uint8_t *profile_info_sec;
+ int is_exist;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) +
+ sizeof(uint32_t) * 2)) {
+ PMD_DRV_LOG(ERR, "Buff is invalid.");
+ return -EINVAL;
+ }
+
+ pkg_hdr = (struct i40e_package_header *)buff;
+
+ if (!pkg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to fill the package structure");
+ return -EINVAL;
+ }
+
+ if (pkg_hdr->segment_count < 2) {
+ PMD_DRV_LOG(ERR, "Segment_count should be 2 at least.");
+ return -EINVAL;
+ }
+
+ /* Find metadata segment */
+ metadata_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_METADATA,
+ pkg_hdr);
+ if (!metadata_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find metadata segment header");
+ return -EINVAL;
+ }
+ track_id = ((struct i40e_metadata_segment *)metadata_seg_hdr)->track_id;
+
+ /* Find profile segment */
+ profile_seg_hdr = i40e_find_segment_in_package(SEGMENT_TYPE_I40E,
+ pkg_hdr);
+ if (!profile_seg_hdr) {
+ PMD_DRV_LOG(ERR, "Failed to find profile segment header");
+ return -EINVAL;
+ }
+
+ profile_info_sec = rte_zmalloc(
+ "i40e_profile_info",
+ sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info),
+ 0);
+ if (!profile_info_sec) {
+ PMD_DRV_LOG(ERR, "Failed to allocate memory");
+ return -EINVAL;
+ }
+
+ if (op == RTE_PMD_I40E_PKG_OP_WR_ADD) {
+ /* Check if the profile exists */
+ i40e_generate_profile_info_sec(
+ ((struct i40e_profile_segment *)profile_seg_hdr)->name,
+ &((struct i40e_profile_segment *)profile_seg_hdr)->version,
+ track_id, profile_info_sec, 1);
+ is_exist = i40e_check_profile_info(port, profile_info_sec);
+ if (is_exist > 0) {
+ PMD_DRV_LOG(ERR, "Profile already exists.");
+ rte_free(profile_info_sec);
+ return 1;
+ } else if (is_exist < 0) {
+ PMD_DRV_LOG(ERR, "Failed to check profile.");
+ rte_free(profile_info_sec);
+ return -EINVAL;
+ }
+
+ /* Write profile to HW */
+ status = i40e_write_profile(
+ hw,
+ (struct i40e_profile_segment *)profile_seg_hdr,
+ track_id);
+ if (status) {
+ PMD_DRV_LOG(ERR, "Failed to write profile.");
+ rte_free(profile_info_sec);
+ return status;
+ }
+
+ /* Add profile info to info list */
+ status = i40e_add_rm_profile_info(hw, profile_info_sec);
+ if (status)
+ PMD_DRV_LOG(ERR, "Failed to add profile info.");
+ } else {
+ PMD_DRV_LOG(ERR, "Operation not supported.");
+ }
+
+ rte_free(profile_info_sec);
+ return status;
+}
+
+int
+rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_hw *hw;
+ enum i40e_status_code status = I40E_SUCCESS;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (size < (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4))
+ return -EINVAL;
+
+ hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ status = i40e_aq_get_ddp_list(hw, (void *)buff,
+ size, 0, NULL);
+
+ return status;
+}
+
+static int check_invalid_pkt_type(uint32_t pkt_type)
+{
+ uint32_t l2, l3, l4, tnl, il2, il3, il4;
+
+ l2 = pkt_type & RTE_PTYPE_L2_MASK;
+ l3 = pkt_type & RTE_PTYPE_L3_MASK;
+ l4 = pkt_type & RTE_PTYPE_L4_MASK;
+ tnl = pkt_type & RTE_PTYPE_TUNNEL_MASK;
+ il2 = pkt_type & RTE_PTYPE_INNER_L2_MASK;
+ il3 = pkt_type & RTE_PTYPE_INNER_L3_MASK;
+ il4 = pkt_type & RTE_PTYPE_INNER_L4_MASK;
+
+ if (l2 &&
+ l2 != RTE_PTYPE_L2_ETHER &&
+ l2 != RTE_PTYPE_L2_ETHER_TIMESYNC &&
+ l2 != RTE_PTYPE_L2_ETHER_ARP &&
+ l2 != RTE_PTYPE_L2_ETHER_LLDP &&
+ l2 != RTE_PTYPE_L2_ETHER_NSH &&
+ l2 != RTE_PTYPE_L2_ETHER_VLAN &&
+ l2 != RTE_PTYPE_L2_ETHER_QINQ)
+ return -1;
+
+ if (l3 &&
+ l3 != RTE_PTYPE_L3_IPV4 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6 &&
+ l3 != RTE_PTYPE_L3_IPV4_EXT_UNKNOWN &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT &&
+ l3 != RTE_PTYPE_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (l4 &&
+ l4 != RTE_PTYPE_L4_TCP &&
+ l4 != RTE_PTYPE_L4_UDP &&
+ l4 != RTE_PTYPE_L4_FRAG &&
+ l4 != RTE_PTYPE_L4_SCTP &&
+ l4 != RTE_PTYPE_L4_ICMP &&
+ l4 != RTE_PTYPE_L4_NONFRAG)
+ return -1;
+
+ if (tnl &&
+ tnl != RTE_PTYPE_TUNNEL_IP &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT &&
+ tnl != RTE_PTYPE_TUNNEL_VXLAN &&
+ tnl != RTE_PTYPE_TUNNEL_NVGRE &&
+ tnl != RTE_PTYPE_TUNNEL_GENEVE &&
+ tnl != RTE_PTYPE_TUNNEL_GRENAT)
+ return -1;
+
+ if (il2 &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_VLAN &&
+ il2 != RTE_PTYPE_INNER_L2_ETHER_QINQ)
+ return -1;
+
+ if (il3 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6 &&
+ il3 != RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT &&
+ il3 != RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN)
+ return -1;
+
+ if (il4 &&
+ il4 != RTE_PTYPE_INNER_L4_TCP &&
+ il4 != RTE_PTYPE_INNER_L4_UDP &&
+ il4 != RTE_PTYPE_INNER_L4_FRAG &&
+ il4 != RTE_PTYPE_INNER_L4_SCTP &&
+ il4 != RTE_PTYPE_INNER_L4_ICMP &&
+ il4 != RTE_PTYPE_INNER_L4_NONFRAG)
+ return -1;
+
+ return 0;
+}
+
+static int check_invalid_ptype_mapping(
+ struct rte_pmd_i40e_ptype_mapping *mapping_table,
+ uint16_t count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ uint16_t ptype = mapping_table[i].hw_ptype;
+ uint32_t pkt_type = mapping_table[i].sw_ptype;
+
+ if (ptype >= I40E_MAX_PKT_TYPE)
+ return -1;
+
+ if (pkt_type == RTE_PTYPE_UNKNOWN)
+ continue;
+
+ if (pkt_type & RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK)
+ continue;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_i40e_ptype_mapping_update(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (count > I40E_MAX_PKT_TYPE)
+ return -EINVAL;
+
+ if (check_invalid_ptype_mapping(mapping_items, count))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ if (exclusive) {
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = RTE_PTYPE_UNKNOWN;
+ }
+
+ for (i = 0; i < count; i++)
+ ad->ptype_tbl[mapping_items[i].hw_ptype]
+ = mapping_items[i].sw_ptype;
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_reset(uint8_t port)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ i40e_set_default_ptype_table(dev);
+
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_get(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t size,
+ uint16_t *count,
+ uint8_t valid_only)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ int n = 0;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (n >= size)
+ break;
+ if (valid_only && ad->ptype_tbl[i] == RTE_PTYPE_UNKNOWN)
+ continue;
+ mapping_items[n].hw_ptype = i;
+ mapping_items[n].sw_ptype = ad->ptype_tbl[i];
+ n++;
+ }
+
+ *count = n;
+ return 0;
+}
+
+int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+ uint32_t target,
+ uint8_t mask,
+ uint32_t pkt_type)
+{
+ struct rte_eth_dev *dev;
+ struct i40e_adapter *ad;
+ uint16_t i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_i40e_supported(dev))
+ return -ENOTSUP;
+
+ if (!mask && check_invalid_pkt_type(target))
+ return -EINVAL;
+
+ if (check_invalid_pkt_type(pkt_type))
+ return -EINVAL;
+
+ ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++) {
+ if (mask) {
+ if ((target | ad->ptype_tbl[i]) == target &&
+ (target & ad->ptype_tbl[i]))
+ ad->ptype_tbl[i] = pkt_type;
+ } else {
+ if (ad->ptype_tbl[i] == target)
+ ad->ptype_tbl[i] = pkt_type;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/i40e/rte_pmd_i40e.h b/drivers/net/i40e/rte_pmd_i40e.h
new file mode 100644
index 00000000..1efb2c4b
--- /dev/null
+++ b/drivers/net/i40e/rte_pmd_i40e.h
@@ -0,0 +1,590 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PMD_I40E_H_
+#define _PMD_I40E_H_
+
+/**
+ * @file rte_pmd_i40e.h
+ *
+ * i40e PMD specific functions.
+ *
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ */
+
+#include <rte_ethdev.h>
+
+/**
+ * Response sent back to i40e driver from user app after callback
+ */
+enum rte_pmd_i40e_mb_event_rsp {
+ RTE_PMD_I40E_MB_EVENT_NOOP_ACK, /**< skip mbox request and ACK */
+ RTE_PMD_I40E_MB_EVENT_NOOP_NACK, /**< skip mbox request and NACK */
+ RTE_PMD_I40E_MB_EVENT_PROCEED, /**< proceed with mbox request */
+ RTE_PMD_I40E_MB_EVENT_MAX /**< max value of this enum */
+};
+
+/**
+ * Data sent to the user application when the callback is executed.
+ */
+struct rte_pmd_i40e_mb_event_param {
+ uint16_t vfid; /**< Virtual Function number */
+ uint16_t msg_type; /**< VF to PF message type, see i40e_virtchnl_ops */
+ uint16_t retval; /**< return value */
+ void *msg; /**< pointer to message */
+ uint16_t msglen; /**< length of the message */
+};
+
+/**
+ * Option of package processing.
+ */
+enum rte_pmd_i40e_package_op {
+ RTE_PMD_I40E_PKG_OP_UNDEFINED = 0,
+ RTE_PMD_I40E_PKG_OP_WR_ADD, /**< load package and add to info list */
+ RTE_PMD_I40E_PKG_OP_MAX = 32
+};
+
+#define RTE_PMD_I40E_DDP_NAME_SIZE 32
+
+/**
+ * Version for dynamic device personalization.
+ * Version in "major.minor.update.draft" format.
+ */
+struct rte_pmd_i40e_ddp_version {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t update;
+ uint8_t draft;
+};
+
+/**
+ * Profile information in profile info list.
+ */
+struct rte_pmd_i40e_profile_info {
+ uint32_t track_id;
+ struct rte_pmd_i40e_ddp_version version;
+ uint8_t owner;
+ uint8_t reserved[7];
+ uint8_t name[RTE_PMD_I40E_DDP_NAME_SIZE];
+};
+
+/**
+ * Profile information list returned from HW.
+ */
+struct rte_pmd_i40e_profile_list {
+ uint32_t p_count;
+ struct rte_pmd_i40e_profile_info p_info[1];
+};
+
+/**
+ * ptype mapping table only accept RTE_PTYPE_XXX or "user defined" ptype.
+ * A ptype with MSB set will be regarded as a user defined ptype.
+ * Below macro help to create a user defined ptype.
+ */
+#define RTE_PMD_I40E_PTYPE_USER_DEFINE_MASK 0x80000000
+
+struct rte_pmd_i40e_ptype_mapping {
+ uint16_t hw_ptype; /**< hardware defined packet type*/
+ uint32_t sw_ptype; /**< software defined packet type */
+};
+
+/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf);
+
+/**
+ * Enable/Disable VF MAC anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set MAC anti spoofing.
+ * @param on
+ * 1 - Enable VFs MAC anti spoofing.
+ * 0 - Disable VFs MAC anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN anti spoofing.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set VLAN anti spoofing.
+ * @param on
+ * 1 - Enable VFs VLAN anti spoofing.
+ * 0 - Disable VFs VLAN anti spoofing.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable TX loopback on all the PF and VFs.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param on
+ * 1 - Enable TX loopback.
+ * 0 - Disable TX loopback.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_tx_loopback(uint8_t port,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF unicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable VF multicast promiscuous mode.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to set.
+ * @param on
+ * 1 - Enable.
+ * 0 - Disable.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Set the VF MAC address.
+ *
+ * PF should set MAC address before VF initialized, if PF sets the MAC
+ * address after VF initialized, new MAC address won't be effective until
+ * VF reinitialize.
+ *
+ * This will remove all existing MAC filters.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF id.
+ * @param mac_addr
+ * VF MAC address.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* or *mac_addr* is invalid.
+ */
+int rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id,
+ struct ether_addr *mac_addr);
+
+/**
+ * Enable/Disable vf vlan strip for all queues in a pool
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * ID specifying VF.
+ * @param on
+ * 1 - Enable VF's vlan strip on RX queues.
+ * 0 - Disable VF's vlan strip on RX queues.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int
+rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+ * Enable/Disable vf vlan insert
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param vlan_id
+ * 0 - Disable VF's vlan insert.
+ * n - Enable; n is inserted as the vlan id.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id,
+ uint16_t vlan_id);
+
+/**
+ * Enable/Disable vf broadcast mode
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable broadcast.
+ * 1 - Enable broadcast.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id,
+ uint8_t on);
+
+/**
+ * Enable/Disable vf vlan tag
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param on
+ * 0 - Disable VF's vlan tag.
+ * n - Enable VF's vlan tag.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on);
+
+/**
+ * Enable/Disable VF VLAN filter
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vlan_id
+ * ID specifying VLAN
+ * @param vf_mask
+ * Mask to filter VF's
+ * @param on
+ * 0 - Disable VF's VLAN filter.
+ * 1 - Enable VF's VLAN filter.
+ *
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id,
+ uint64_t vf_mask, uint8_t on);
+
+/**
+ * Get VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @param stats
+ * A pointer to a structure of type *rte_eth_stats* to be filled with
+ * the values of device counters for the following set of statistics:
+ * - *ipackets* with the total of successfully received packets.
+ * - *opackets* with the total of successfully transmitted packets.
+ * - *ibytes* with the total of successfully received bytes.
+ * - *obytes* with the total of successfully transmitted bytes.
+ * - *ierrors* with the total of erroneous received packets.
+ * - *oerrors* with the total of failed transmitted packets.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+
+int rte_pmd_i40e_get_vf_stats(uint8_t port,
+ uint16_t vf_id,
+ struct rte_eth_stats *stats);
+
+/**
+ * Clear VF's statistics
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * VF on which to get.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_reset_vf_stats(uint8_t port,
+ uint16_t vf_id);
+
+/**
+ * Set VF's max bandwidth.
+ *
+ * Per VF bandwidth limitation and per TC bandwidth limitation cannot
+ * be enabled in parallel. If per TC bandwidth is enabled, this function
+ * will disable it.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param bw
+ * Bandwidth for this VF.
+ * The value should be an absolute bandwidth in Mbps.
+ * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets.
+ * Not count the bytes added by physical layer.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_max_bw(uint8_t port,
+ uint16_t vf_id,
+ uint32_t bw);
+
+/**
+ * Set all the TCs' bandwidth weight on a specific VF.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param tc_num
+ * Number of TCs.
+ * @param bw_weight
+ * An array of relative bandwidth weight for all the TCs.
+ * The summary of the bw_weight should be 100.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port,
+ uint16_t vf_id,
+ uint8_t tc_num,
+ uint8_t *bw_weight);
+
+/**
+ * Set a specific TC's max bandwidth on a specific VF.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf_id
+ * ID specifying VF.
+ * @param tc_no
+ * Number specifying TC.
+ * @param bw
+ * Max bandwidth for this TC.
+ * The value should be an absolute bandwidth in Mbps.
+ * The bandwidth is a L2 bandwidth counting the bytes of ethernet packets.
+ * Not count the bytes added by physical layer.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_vf_tc_max_bw(uint8_t port,
+ uint16_t vf_id,
+ uint8_t tc_no,
+ uint32_t bw);
+
+/**
+ * Set some TCs to strict priority mode on a physical port.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param tc_map
+ * A bit map for the TCs.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_i40e_set_tc_strict_prio(uint8_t port, uint8_t tc_map);
+
+/**
+ * Load/Unload a ddp package
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param buff
+ * buffer of package.
+ * @param size
+ * size of buffer.
+ * @param op
+ * Operation of package processing
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (1) if profile exists.
+ */
+int rte_pmd_i40e_process_ddp_package(uint8_t port, uint8_t *buff,
+ uint32_t size,
+ enum rte_pmd_i40e_package_op op);
+
+/**
+ * rte_pmd_i40e_get_ddp_list - Get loaded profile list
+ * @param port
+ * port id
+ * @param buff
+ * buffer for response
+ * @param size
+ * buffer size
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_i40e_get_ddp_list(uint8_t port, uint8_t *buff, uint32_t size);
+
+/**
+ * Update hardware defined ptype to software defined packet type
+ * mapping table.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the mapping items array.
+ * @param count
+ * number of mapping items.
+ * @param exclusive
+ * the flag indicate different ptype mapping update method.
+ * -(0) only overwrite referred PTYPE mapping,
+ * keep other PTYPEs mapping unchanged.
+ * -(!0) overwrite referred PTYPE mapping,
+ * set other PTYPEs maps to PTYPE_UNKNOWN.
+ */
+int rte_pmd_i40e_ptype_mapping_update(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t count,
+ uint8_t exclusive);
+
+/**
+ * Reset hardware defined ptype to software defined ptype
+ * mapping table to default.
+ *
+ * @param port
+ * pointer to port identifier of the device
+ */
+int rte_pmd_i40e_ptype_mapping_reset(uint8_t port);
+
+/**
+ * Get hardware defined ptype to software defined ptype
+ * mapping items.
+ *
+ * @param port
+ * pointer to port identifier of the device.
+ * @param mapping_items
+ * the base address of the array to store returned items.
+ * @param size
+ * the size of the input array.
+ * @param count
+ * the place to store the number of returned items.
+ * @param valid_only
+ * -(0) return full mapping table.
+ * -(!0) only return mapping items which packet_type != RTE_PTYPE_UNKNOWN.
+ */
+int rte_pmd_i40e_ptype_mapping_get(
+ uint8_t port,
+ struct rte_pmd_i40e_ptype_mapping *mapping_items,
+ uint16_t size,
+ uint16_t *count,
+ uint8_t valid_only);
+
+/**
+ * Replace a specific or a group of software defined ptypes
+ * with a new one
+ *
+ * @param port
+ * pointer to port identifier of the device
+ * @param target
+ * the packet type to be replaced
+ * @param mask
+ * -(0) target represent a specific software defined ptype.
+ * -(!0) target is a mask to represent a group of software defined ptypes.
+ * @param pkt_type
+ * the new packet type to overwrite
+ */
+int rte_pmd_i40e_ptype_mapping_replace(uint8_t port,
+ uint32_t target,
+ uint8_t mask,
+ uint32_t pkt_type);
+
+#endif /* _PMD_I40E_H_ */
diff --git a/drivers/net/i40e/rte_pmd_i40e_version.map b/drivers/net/i40e/rte_pmd_i40e_version.map
index ef353984..3b0e805d 100644
--- a/drivers/net/i40e/rte_pmd_i40e_version.map
+++ b/drivers/net/i40e/rte_pmd_i40e_version.map
@@ -2,3 +2,39 @@ DPDK_2.0 {
local: *;
};
+
+DPDK_17.02 {
+ global:
+
+ rte_pmd_i40e_get_vf_stats;
+ rte_pmd_i40e_ping_vfs;
+ rte_pmd_i40e_ptype_mapping_get;
+ rte_pmd_i40e_ptype_mapping_replace;
+ rte_pmd_i40e_ptype_mapping_reset;
+ rte_pmd_i40e_ptype_mapping_update;
+ rte_pmd_i40e_reset_vf_stats;
+ rte_pmd_i40e_set_tx_loopback;
+ rte_pmd_i40e_set_vf_broadcast;
+ rte_pmd_i40e_set_vf_mac_addr;
+ rte_pmd_i40e_set_vf_mac_anti_spoof;
+ rte_pmd_i40e_set_vf_multicast_promisc;
+ rte_pmd_i40e_set_vf_unicast_promisc;
+ rte_pmd_i40e_set_vf_vlan_anti_spoof;
+ rte_pmd_i40e_set_vf_vlan_filter;
+ rte_pmd_i40e_set_vf_vlan_insert;
+ rte_pmd_i40e_set_vf_vlan_stripq;
+ rte_pmd_i40e_set_vf_vlan_tag;
+
+} DPDK_2.0;
+
+DPDK_17.05 {
+ global:
+
+ rte_pmd_i40e_set_tc_strict_prio;
+ rte_pmd_i40e_set_vf_max_bw;
+ rte_pmd_i40e_set_vf_tc_bw_alloc;
+ rte_pmd_i40e_set_vf_tc_max_bw;
+ rte_pmd_i40e_process_ddp_package;
+ rte_pmd_i40e_get_ddp_list;
+
+} DPDK_17.02;
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 94ddc7b8..5529d81c 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -76,6 +76,9 @@ endif
ifeq ($(shell test $(GCC_VERSION) -ge 50 && echo 1), 1)
CFLAGS_ixgbe_common.o += -Wno-logical-not-parentheses
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
endif
endif
@@ -100,6 +103,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_x550.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_phy.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_api.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_hv_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82599.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_dcb_82598.c
@@ -108,6 +112,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_flow.c
ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c
else
@@ -118,13 +123,9 @@ ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
endif
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
# install this header file
SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README
index 6b54c31e..a61617be 100644
--- a/drivers/net/ixgbe/base/README
+++ b/drivers/net/ixgbe/base/README
@@ -1,7 +1,7 @@
..
BSD LICENSE
- Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -34,7 +34,8 @@ Intel® IXGBE driver
===================
This directory contains source code of FreeBSD ixgbe driver of version
-cid-10g-shared-code.2016.08.15 released by ND. The sub-directory of base/
+cid-10g-shared-code.2017.03.29 released by the team which develop
+basic drivers for any ixgbe NIC. The sub-directory of base/
contains the original source package.
This driver is valid for the product(s) listed below
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c
index 724dcbbc..d64abb2e 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.c
+++ b/drivers/net/ixgbe/base/ixgbe_82598.c
@@ -1222,9 +1222,9 @@ STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
diff --git a/drivers/net/ixgbe/base/ixgbe_82598.h b/drivers/net/ixgbe/base/ixgbe_82598.h
index 0326e70b..20aab9fc 100644
--- a/drivers/net/ixgbe/base/ixgbe_82598.h
+++ b/drivers/net/ixgbe/base/ixgbe_82598.h
@@ -45,7 +45,7 @@ s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c
index 832242ee..d9d11a8e 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.c
+++ b/drivers/net/ixgbe/base/ixgbe_82599.c
@@ -2169,9 +2169,9 @@ s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
diff --git a/drivers/net/ixgbe/base/ixgbe_82599.h b/drivers/net/ixgbe/base/ixgbe_82599.h
index c034d3d9..d555dbce 100644
--- a/drivers/net/ixgbe/base/ixgbe_82599.h
+++ b/drivers/net/ixgbe/base/ixgbe_82599.h
@@ -57,7 +57,7 @@ s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val);
s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked);
diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c
index 094ee526..4117fb01 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.c
+++ b/drivers/net/ixgbe/base/ixgbe_api.c
@@ -205,6 +205,7 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
hw->mac.type = ixgbe_mac_X550EM_x;
hw->mvals = ixgbe_mvals_X550EM_x;
break;
@@ -1147,12 +1148,15 @@ s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* @min: driver minor number to be sent to firmware
* @build: driver build number to be sent to firmware
* @ver: driver version number to be sent to firmware
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
**/
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
- u8 ver)
+ u8 ver, u16 len, char *driver_ver)
{
return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
- build, ver), IXGBE_NOT_IMPLEMENTED);
+ build, ver, len, driver_ver),
+ IXGBE_NOT_IMPLEMENTED);
}
@@ -1575,7 +1579,7 @@ s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
{
return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
(hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h
index 24c4ae8d..2f532aa8 100644
--- a/drivers/net/ixgbe/base/ixgbe_api.h
+++ b/drivers/net/ixgbe/base/ixgbe_api.h
@@ -133,7 +133,7 @@ s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
- u8 ver);
+ u8 ver, u16 len, char *driver_ver);
s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw);
void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
@@ -143,7 +143,7 @@ s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
-u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c
index cca19efc..4dabb434 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.c
+++ b/drivers/net/ixgbe/base/ixgbe_common.c
@@ -113,6 +113,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.led_off = ixgbe_led_off_generic;
mac->ops.blink_led_start = ixgbe_blink_led_start_generic;
mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic;
+ mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic;
/* RAR, Multicast, VLAN */
mac->ops.set_rar = ixgbe_set_rar_generic;
@@ -188,7 +189,10 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
break;
case ixgbe_media_type_backplane:
- supported = true;
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI)
+ supported = false;
+ else
+ supported = true;
break;
case ixgbe_media_type_copper:
/* only some copper devices support flow control autoneg */
@@ -409,8 +413,10 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
/* Setup flow control */
ret_val = ixgbe_setup_fc(hw);
- if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED)
+ if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) {
+ DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val);
return ret_val;
+ }
/* Cache bit indicating need for crosstalk fix */
switch (hw->mac.type) {
@@ -492,11 +498,17 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
/* Reset the hardware */
status = hw->mac.ops.reset_hw(hw);
- if (status == IXGBE_SUCCESS) {
+ if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) {
/* Start the HW */
status = hw->mac.ops.start_hw(hw);
}
+ /* Initialize the LED link active for LED blink support */
+ hw->mac.ops.init_led_link_act(hw);
+
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status);
+
return status;
}
@@ -1136,6 +1148,47 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_led_link_act_generic - Store the LED index link/activity.
+ * @hw: pointer to hardware structure
+ *
+ * Store the index for the link active LED. This will be used to support
+ * blinking the LED.
+ **/
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 led_reg, led_mode;
+ u8 i;
+
+ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+ /* Get LED link active from the LEDCTL register */
+ for (i = 0; i < 4; i++) {
+ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i);
+
+ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) ==
+ IXGBE_LED_LINK_ACTIVE) {
+ mac->led_link_act = i;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ /*
+ * If LEDCTL register does not have the LED link active set, then use
+ * known MAC defaults.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_X550EM_x:
+ mac->led_link_act = 1;
+ break;
+ default:
+ mac->led_link_act = 2;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_led_on_generic - Turns on the software controllable LEDs.
* @hw: pointer to hardware structure
* @index: led number to turn on
@@ -3764,7 +3817,8 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ if (mpsar_lo == 0 && mpsar_hi == 0 &&
+ rar != 0 && rar != hw->mac.san_mac_rar_index)
hw->mac.ops.clear_rar(hw, rar);
done:
return IXGBE_SUCCESS;
@@ -4184,7 +4238,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type >= ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -4595,13 +4649,15 @@ rel_out:
* semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
**/
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 sub)
+ u8 build, u8 sub, u16 len,
+ const char *driver_ver)
{
struct ixgbe_hic_drv_info fw_cmd;
int i;
s32 ret_val = IXGBE_SUCCESS;
DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
+ UNREFERENCED_2PARAMETER(len, driver_ver);
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h
index 66dd5659..903f34d5 100644
--- a/drivers/net/ixgbe/base/ixgbe_common.h
+++ b/drivers/net/ixgbe/base/ixgbe_common.h
@@ -72,6 +72,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
@@ -155,12 +156,14 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
int strategy);
void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
- u8 build, u8 ver);
+ u8 build, u8 ver, u16 len, const char *str);
u8 ixgbe_calculate_checksum(u8 *buffer, u32 length);
s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
u32 length, u32 timeout, bool return_data);
s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout);
-
+s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *);
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT]);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.c b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
new file mode 100644
index 00000000..47143a26
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.c
@@ -0,0 +1,240 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2015, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#include "ixgbe_vf.h"
+#include "ixgbe_hv_vf.h"
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ UNREFERENCED_2PARAMETER(hw, xcast_mode);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ UNREFERENCED_3PARAMETER(hw, index, addr);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ UNREFERENCED_PARAMETER(hw);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vlan, u32 vind)
+{
+ UNREFERENCED_5PARAMETER(hw, index, addr, vlan, vind);
+
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+}
+
+/**
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 links_reg;
+ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ DELAY(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Reserved for pre-x550 devices */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 reg;
+
+ /* If we are on Hyper-V, we implement this functionality
+ * differently.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+ /* CRC == 4 */
+ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ * Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+ UNREFERENCED_1PARAMETER(hw);
+
+ /* Hyper-V only supports api version ixgbe_mbox_api_10 */
+ if (api != ixgbe_mbox_api_10)
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw)
+{
+ /* Set defaults for VF then override applicable Hyper-V
+ * specific functions
+ */
+ ixgbe_init_ops_vf(hw);
+
+ hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf;
+ hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf;
+ hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf;
+ hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf;
+ hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf;
+ hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode;
+ hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf;
+ hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf;
+ hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf;
+
+ return IXGBE_SUCCESS;
+}
diff --git a/drivers/net/ixgbe/base/ixgbe_hv_vf.h b/drivers/net/ixgbe/base/ixgbe_hv_vf.h
new file mode 100644
index 00000000..9119f29f
--- /dev/null
+++ b/drivers/net/ixgbe/base/ixgbe_hv_vf.h
@@ -0,0 +1,41 @@
+/*******************************************************************************
+
+Copyright (c) 2001-2016, Intel Corporation
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+***************************************************************************/
+
+#ifndef _IXGBE_HV_VF_H_
+#define _IXGBE_HV_VF_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw);
+
+#endif /* _IXGBE_HV_VF_H_ */
diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h
index 7556a818..bde50a51 100644
--- a/drivers/net/ixgbe/base/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/base/ixgbe_mbx.h
@@ -114,6 +114,14 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */
#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c
+/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */
+enum ixgbevf_xcast_modes {
+ IXGBEVF_XCAST_MODE_NONE = 0,
+ IXGBEVF_XCAST_MODE_MULTI,
+ IXGBEVF_XCAST_MODE_ALLMULTI,
+ IXGBEVF_XCAST_MODE_PROMISC,
+};
+
/* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h
index 77f0af51..4aab278d 100644
--- a/drivers/net/ixgbe/base/ixgbe_osdep.h
+++ b/drivers/net/ixgbe/base/ixgbe_osdep.h
@@ -44,6 +44,7 @@
#include <rte_cycles.h>
#include <rte_log.h>
#include <rte_byteorder.h>
+#include <rte_io.h>
#include "../ixgbe_logs.h"
#include "../ixgbe_bypass_defines.h"
@@ -81,6 +82,7 @@
#define UNREFERENCED_2PARAMETER(_p, _q)
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)
/* Shared code error reporting */
enum {
@@ -95,8 +97,9 @@ enum {
#define STATIC static
#define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i)
#define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i)
+#define IXGBE_CPU_TO_LE16(_i) rte_cpu_to_le_16(_i)
#define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i)
-#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i)
+#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i)
#define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i)
#define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i)
#define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i)
@@ -121,16 +124,18 @@ typedef int bool;
#define prefetch(x) rte_prefetch0(x)
-#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define IXGBE_PCI_REG(reg) rte_read32(reg)
static inline uint32_t ixgbe_read_addr(volatile void* addr)
{
return rte_le_to_cpu_32(IXGBE_PCI_REG(addr));
}
-#define IXGBE_PCI_REG_WRITE(reg, value) do { \
- IXGBE_PCI_REG((reg)) = (rte_cpu_to_le_32(value)); \
-} while(0)
+#define IXGBE_PCI_REG_WRITE(reg, value) \
+ rte_write32((rte_cpu_to_le_32(value)), reg)
+
+#define IXGBE_PCI_REG_WRITE_RELAXED(reg, value) \
+ rte_write32_relaxed((rte_cpu_to_le_32(value)), reg)
#define IXGBE_PCI_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c
index 43c55d74..62c3080a 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.c
+++ b/drivers/net/ixgbe/base/ixgbe_phy.c
@@ -113,7 +113,7 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
u16 *val, bool lock)
{
u32 swfw_mask = hw->phy.phy_semaphore_mask;
- int max_retry = 10;
+ int max_retry = 3;
int retry = 0;
u8 csum_byte;
u8 high_bits;
@@ -121,8 +121,6 @@ s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg,
u8 reg_high;
u8 csum;
- if (hw->mac.type >= ixgbe_mac_X550)
- max_retry = 3;
reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */
csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF);
csum = ~csum;
@@ -293,8 +291,11 @@ static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr)
{
u16 ext_ability = 0;
- if (!ixgbe_validate_phy_addr(hw, phy_addr))
+ if (!ixgbe_validate_phy_addr(hw, phy_addr)) {
+ DEBUGOUT1("Unable to validate PHY address 0x%04X\n",
+ phy_addr);
return false;
+ }
if (ixgbe_get_phy_id(hw))
return false;
@@ -413,6 +414,8 @@ bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
if (phy_id != 0xFFFF && phy_id != 0x0)
valid = true;
+ DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id);
+
return valid;
}
@@ -441,6 +444,9 @@ s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
+ DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n",
+ phy_id_high, phy_id_low);
+
return status;
}
@@ -459,7 +465,6 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case X550_PHY_ID1:
case X550_PHY_ID2:
case X550_PHY_ID3:
case X540_PHY_ID:
@@ -477,7 +482,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
break;
case IXGBE_M88E1500_E_PHY_ID:
case IXGBE_M88E1543_E_PHY_ID:
- phy_type = ixgbe_phy_m88;
+ phy_type = ixgbe_phy_ext_1g_t;
break;
default:
phy_type = ixgbe_phy_unknown;
@@ -528,11 +533,30 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
*/
for (i = 0; i < 30; i++) {
msec_delay(100);
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
- IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
- if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
- usec_delay(2);
- break;
+ if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_TX_VENDOR_ALARMS_3,
+ IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) {
+ usec_delay(2);
+ break;
+ }
+ } else {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PHY_XS_CONTROL,
+ IXGBE_MDIO_PHY_XS_DEV_TYPE,
+ &ctrl);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+ usec_delay(2);
+ break;
+ }
}
}
@@ -554,7 +578,7 @@ out:
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
- u16 *phy_data)
+ u16 *phy_data)
{
u32 i, data, command;
@@ -576,12 +600,13 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
command = IXGBE_READ_REG(hw, IXGBE_MSCA);
if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
+ break;
}
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n");
+ DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n");
return IXGBE_ERR_PHY;
}
@@ -611,6 +636,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n");
+ DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n");
return IXGBE_ERR_PHY;
}
@@ -768,91 +794,63 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
- if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
- /* Set or unset auto-negotiation 10G advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 10G advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
- autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_10GB_FULL))
+ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
- hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
- if (hw->mac.type == ixgbe_mac_X550) {
- if (speed & IXGBE_LINK_SPEED_5GB_FULL) {
- /* Set or unset auto-negotiation 5G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_5GB_FULL)
- autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) {
- /* Set or unset auto-negotiation 2.5G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
-
- autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_2_5GB_FULL)
- autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ if (hw->mac.type == ixgbe_mac_X550) {
+ /* Set or unset auto-negotiation 5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE;
+
+ /* Set or unset auto-negotiation 2.5G advertisement */
+ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised &
+ IXGBE_LINK_SPEED_2_5GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_2_5GB_FULL))
+ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE;
}
- if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
- /* Set or unset auto-negotiation 1G advertisement */
- hw->phy.ops.read_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ /* Set or unset auto-negotiation 1G advertisement */
+ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) &&
+ (speed & IXGBE_LINK_SPEED_1GB_FULL))
+ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
- hw->phy.ops.write_reg(hw,
- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ /* Set or unset auto-negotiation 100M advertisement */
+ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_reg);
- if (speed & IXGBE_LINK_SPEED_100_FULL) {
- /* Set or unset auto-negotiation 100M advertisement */
- hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_reg);
+ autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+ IXGBE_MII_100BASE_T_ADVERTISE_HALF);
+ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) &&
+ (speed & IXGBE_LINK_SPEED_100_FULL))
+ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
- autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
- IXGBE_MII_100BASE_T_ADVERTISE_HALF);
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
-
- hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_reg);
- }
+ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ autoneg_reg);
/* Blocked by MNG FW so don't reset PHY */
if (ixgbe_check_reset_blocked(hw))
@@ -1542,16 +1540,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("SFP+ module not supported\n");
@@ -1583,9 +1575,9 @@ err_read_i2c_eeprom:
*
* Determines physical layer capabilities of the current SFP.
*/
-s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u8 comp_codes_10g = 0;
u8 comp_codes_1g = 0;
@@ -1804,16 +1796,10 @@ s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
status = IXGBE_SUCCESS;
} else {
if (hw->allow_unsupported_sfp == true) {
- EWARN(hw, "WARNING: Intel (R) Network "
- "Connections are quality tested "
- "using Intel (R) Ethernet Optics."
- " Using untested modules is not "
- "supported and may cause unstable"
- " operation or damage to the "
- "module or the adapter. Intel "
- "Corporation is not responsible "
- "for any harm caused by using "
- "untested modules.\n", status);
+ EWARN(hw,
+ "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. "
+ "Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. "
+ "Intel Corporation is not responsible for any harm caused by using untested modules.\n");
status = IXGBE_SUCCESS;
} else {
DEBUGOUT("QSFP module not supported\n");
@@ -1838,7 +1824,6 @@ err_read_i2c_eeprom:
return IXGBE_ERR_SFP_NOT_PRESENT;
}
-
/**
* ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h
index da14abcd..cf8cadd9 100644
--- a/drivers/net/ixgbe/base/ixgbe_phy.h
+++ b/drivers/net/ixgbe/base/ixgbe_phy.h
@@ -92,8 +92,9 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5
-#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
-#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
+#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F
+#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */
+#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
@@ -154,73 +155,6 @@ POSSIBILITY OF SUCH DAMAGE.
/* SFP+ SFF-8472 Compliance */
#define IXGBE_SFF_SFF_8472_UNSUP 0x00
-/* More phy definitions */
-#define IXGBE_M88E1500_COPPER_CTRL 0 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_CTRL_RESET (1u << 15)
-#define IXGBE_M88E1500_COPPER_CTRL_AN_EN (1u << 12)
-#define IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN (1u << 11)
-#define IXGBE_M88E1500_COPPER_CTRL_RESTART_AN (1u << 9)
-#define IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX (1u << 8)
-#define IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB (1u << 6)
-#define IXGBE_M88E1500_COPPER_STATUS 1 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_STATUS_AN_DONE (1u << 5)
-#define IXGBE_M88E1500_COPPER_AN 4 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_AN_AS_PAUSE (1u << 11)
-#define IXGBE_M88E1500_COPPER_AN_PAUSE (1u << 10)
-#define IXGBE_M88E1500_COPPER_AN_T4 (1u << 9)
-#define IXGBE_M88E1500_COPPER_AN_100TX_FD (1u << 8)
-#define IXGBE_M88E1500_COPPER_AN_100TX_HD (1u << 7)
-#define IXGBE_M88E1500_COPPER_AN_10TX_FD (1u << 6)
-#define IXGBE_M88E1500_COPPER_AN_10TX_HD (1u << 5)
-#define IXGBE_M88E1500_COPPER_AN_LP_ABILITY 5 /* Page 0 reg */
-#define IXGBE_M88E1500_COPPER_AN_LP_AS_PAUSE (1u << 11)
-#define IXGBE_M88E1500_COPPER_AN_LP_PAUSE (1u << 10)
-#define IXGBE_M88E1500_1000T_CTRL 9 /* Page 0 reg */
-/* 1=Configure PHY as Master 0=Configure PHY as Slave */
-#define IXGBE_M88E1500_1000T_CTRL_MS_VALUE (1u << 11)
-#define IXGBE_M88E1500_1000T_CTRL_1G_FD (1u << 9)
-/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */
-#define IXGBE_M88E1500_1000T_CTRL_MS_ENABLE (1u << 12)
-#define IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX (1u << 9)
-#define IXGBE_M88E1500_1000T_CTRL_HALF_DUPLEX (1u << 8)
-#define IXGBE_M88E1500_1000T_STATUS 10 /* Page 0 reg */
-#define IXGBE_M88E1500_AUTO_COPPER_SGMII 0x2
-#define IXGBE_M88E1500_AUTO_COPPER_BASEX 0x3
-#define IXGBE_M88E1500_STATUS_LINK (1u << 2) /* Interface Link Bit */
-#define IXGBE_M88E1500_MAC_CTRL_1 16 /* Page 0 reg */
-#define IXGBE_M88E1500_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */
-#define IXGBE_M88E1500_MAC_CTRL_1_DWN_SHIFT 12
-#define IXGBE_M88E1500_MAC_CTRL_1_DWN_4X 3u
-#define IXGBE_M88E1500_MAC_CTRL_1_ED_SHIFT 8
-#define IXGBE_M88E1500_MAC_CTRL_1_ED_TM 3u
-#define IXGBE_M88E1500_MAC_CTRL_1_MDIX_SHIFT 5
-#define IXGBE_M88E1500_MAC_CTRL_1_MDIX_AUTO 3u
-#define IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN (1u << 2)
-#define IXGBE_M88E1500_PHY_SPEC_STATUS 17 /* Page 0 reg */
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_SHIFT 14
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_MASK 3u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_10 0u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_100 1u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_SPEED_1000 2u
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_DUPLEX (1u << 13)
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_RESOLVED (1u << 11)
-#define IXGBE_M88E1500_PHY_SPEC_STATUS_LINK (1u << 10)
-#define IXGBE_M88E1500_PAGE_ADDR 22 /* All pages reg */
-#define IXGBE_M88E1500_FIBER_CTRL 0 /* Page 1 reg */
-#define IXGBE_M88E1500_FIBER_CTRL_RESET (1u << 15)
-#define IXGBE_M88E1500_FIBER_CTRL_SPEED_LSB (1u << 13)
-#define IXGBE_M88E1500_FIBER_CTRL_AN_EN (1u << 12)
-#define IXGBE_M88E1500_FIBER_CTRL_POWER_DOWN (1u << 11)
-#define IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL (1u << 8)
-#define IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB (1u << 6)
-#define IXGBE_M88E1500_MAC_SPEC_CTRL 16 /* Page 2 reg */
-#define IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN (1u << 3)
-#define IXGBE_M88E1500_EEE_CTRL_1 0 /* Page 18 reg */
-#define IXGBE_M88E1500_EEE_CTRL_1_MS (1u << 0) /* EEE Master/Slave */
-#define IXGBE_M88E1500_GEN_CTRL 20 /* Page 18 reg */
-#define IXGBE_M88E1500_GEN_CTRL_RESET (1u << 15)
-#define IXGBE_M88E1500_GEN_CTRL_MODE_SGMII_COPPER 1u /* Mode bits 0-2 */
-
s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
@@ -258,7 +192,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
-s32 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h
index 4982e035..bda85589 100644
--- a/drivers/net/ixgbe/base/ixgbe_type.h
+++ b/drivers/net/ixgbe/base/ixgbe_type.h
@@ -146,6 +146,7 @@ POSSIBILITY OF SUCH DAMAGE.
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
+#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
@@ -1045,7 +1046,7 @@ struct ixgbe_dmac_config {
#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */
#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
-#define IXGBE_LSWFW 0x15014
+#define IXGBE_LSWFW 0x15F14
#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
#define IXGBE_BMCIPVAL 0x05060
#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001
@@ -1647,7 +1648,6 @@ struct ixgbe_dmac_config {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
-#define X550_PHY_ID1 0x01540220
#define X550_PHY_ID2 0x01540223
#define X550_PHY_ID3 0x01540221
#define X557_PHY_ID 0x01540240
@@ -2622,6 +2622,7 @@ enum {
#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */
#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */
#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */
#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000
#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -3037,6 +3038,7 @@ enum ixgbe_fdir_pballoc_type {
#define FW_CEM_UNUSED_VER 0x0
#define FW_CEM_MAX_RETRIES 3
#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */
#define FW_READ_SHADOW_RAM_CMD 0x31
#define FW_READ_SHADOW_RAM_LEN 0x6
#define FW_WRITE_SHADOW_RAM_CMD 0x33
@@ -3062,6 +3064,59 @@ enum ixgbe_fdir_pballoc_type {
#define FW_INT_PHY_REQ_LEN 10
#define FW_INT_PHY_REQ_READ 0
#define FW_INT_PHY_REQ_WRITE 1
+#define FW_PHY_ACT_REQ_CMD 5
+#define FW_PHY_ACT_DATA_COUNT 4
+#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT)
+#define FW_PHY_ACT_INIT_PHY 1
+#define FW_PHY_ACT_SETUP_LINK 2
+#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0)
+#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1)
+#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2)
+#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3)
+#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4)
+#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5)
+#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6)
+#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7)
+#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8)
+#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9)
+#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT)
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u
+#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u
+#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18)
+#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19)
+#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20)
+#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22)
+#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0)
+#define FW_PHY_ACT_GET_LINK_INFO 3
+#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20)
+#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21)
+#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22)
+#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24)
+#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28)
+#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29)
+#define FW_PHY_ACT_FORCE_LINK_DOWN 4
+#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0)
+#define FW_PHY_ACT_PHY_SW_RESET 5
+#define FW_PHY_ACT_PHY_HW_RESET 6
+#define FW_PHY_ACT_GET_PHY_INFO 7
+#define FW_PHY_ACT_UD_2 0x1002
+#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6)
+#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5)
+#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4)
+#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3)
+#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2)
+#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1)
+#define FW_PHY_ACT_RETRIES 50
+#define FW_PHY_INFO_SPEED_MASK 0xFFFu
+#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
+#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
/* Host Interface Command Structures */
@@ -3111,6 +3166,16 @@ struct ixgbe_hic_drv_info {
u16 pad2; /* end spacing to ensure length is mult. of dword2 */
};
+struct ixgbe_hic_drv_info2 {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ char driver_string[FW_CEM_DRIVER_VERSION_SIZE];
+};
+
/* These need to be dword aligned */
struct ixgbe_hic_read_shadow_ram {
union ixgbe_hic_hdr2 hdr;
@@ -3159,6 +3224,19 @@ struct ixgbe_hic_internal_phy_resp {
__be32 read_data;
};
+struct ixgbe_hic_phy_activity_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 pad;
+ __le16 activity_id;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
+struct ixgbe_hic_phy_activity_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 data[FW_PHY_ACT_DATA_COUNT];
+};
+
#ifdef C99
#pragma pack(pop)
#else
@@ -3332,23 +3410,25 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
-typedef u32 ixgbe_physical_layer;
+typedef u64 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
-#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x4000
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000
+#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
+#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
@@ -3567,7 +3647,9 @@ enum ixgbe_phy_type {
ixgbe_phy_aq,
ixgbe_phy_x550em_kr,
ixgbe_phy_x550em_kx4,
+ ixgbe_phy_x550em_xfi,
ixgbe_phy_x550em_ext_t,
+ ixgbe_phy_ext_1g_t,
ixgbe_phy_cu_unknown,
ixgbe_phy_qt,
ixgbe_phy_xaui,
@@ -3586,7 +3668,7 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
ixgbe_phy_sgmii,
- ixgbe_phy_m88,
+ ixgbe_phy_fw,
ixgbe_phy_generic
};
@@ -3643,14 +3725,6 @@ enum ixgbe_fc_mode {
ixgbe_fc_default
};
-/* Master/slave control */
-enum ixgbe_ms_type {
- ixgbe_ms_hw_default = 0,
- ixgbe_ms_force_master,
- ixgbe_ms_force_slave,
- ixgbe_ms_auto
-};
-
/* Smart Speed Settings */
#define IXGBE_SMARTSPEED_MAX_RETRIES 3
enum ixgbe_smart_speed {
@@ -3833,7 +3907,7 @@ struct ixgbe_mac_operations {
s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
void (*enable_relaxed_ordering)(struct ixgbe_hw *);
enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
- u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ u64 (*get_supported_physical_layer)(struct ixgbe_hw *);
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
@@ -3875,6 +3949,7 @@ struct ixgbe_mac_operations {
s32 (*led_off)(struct ixgbe_hw *, u32);
s32 (*blink_led_start)(struct ixgbe_hw *, u32);
s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+ s32 (*init_led_link_act)(struct ixgbe_hw *);
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
@@ -3907,7 +3982,8 @@ struct ixgbe_mac_operations {
void (*fc_autoneg)(struct ixgbe_hw *);
/* Manageability interface */
- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16,
+ const char *);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map);
@@ -4017,6 +4093,7 @@ struct ixgbe_mac_info {
struct ixgbe_dmac_config dmac_config;
bool set_lben;
u32 max_link_up_time;
+ u8 led_link_act;
};
struct ixgbe_phy_info {
@@ -4032,8 +4109,8 @@ struct ixgbe_phy_info {
bool reset_disable;
ixgbe_autoneg_advertised autoneg_advertised;
ixgbe_link_speed speeds_supported;
- enum ixgbe_ms_type ms_type;
- enum ixgbe_ms_type original_ms_type;
+ ixgbe_link_speed eee_speeds_supported;
+ ixgbe_link_speed eee_speeds_advertised;
enum ixgbe_smart_speed smart_speed;
bool smart_speed_active;
bool multispeed_fiber;
@@ -4257,8 +4334,8 @@ struct ixgbe_hw {
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20)
#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21)
-#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M (1 << 23)
-#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24)
+#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25)
+#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
(0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c
index e9c13f23..b513190a 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.c
+++ b/drivers/net/ixgbe/base/ixgbe_vf.c
@@ -432,6 +432,10 @@ s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
switch (hw->api_version) {
case ixgbe_mbox_api_12:
+ /* New modes were introduced in 1.3 version */
+ if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI)
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ /* Fall through */
case ixgbe_mbox_api_13:
break;
default:
@@ -613,13 +617,29 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
break;
case IXGBE_LINKS_SPEED_1G_82599:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
}
/* if the read failed it could just be a mailbox collision, best wait
diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h
index d288f31a..3efffe82 100644
--- a/drivers/net/ixgbe/base/ixgbe_vf.h
+++ b/drivers/net/ixgbe/base/ixgbe_vf.h
@@ -34,6 +34,8 @@ POSSIBILITY OF SUCH DAMAGE.
#ifndef _IXGBE_VF_H_
#define _IXGBE_VF_H_
+#include "ixgbe_type.h"
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -114,6 +116,7 @@ struct ixgbevf_hw_stats {
u64 saved_reset_vfmprc;
};
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c
index 6e778bc9..0e51813b 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.c
+++ b/drivers/net/ixgbe/base/ixgbe_x540.c
@@ -208,6 +208,7 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
{
s32 status;
u32 ctrl, i;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_reset_hw_X540");
@@ -220,10 +221,17 @@ s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
ixgbe_clear_tx_pending(hw);
mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
ctrl = IXGBE_CTRL_RST;
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
/* Poll for reset bit to self-clear indicating reset is complete */
for (i = 0; i < 10; i++) {
@@ -321,9 +329,9 @@ out:
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u16 ext_ability = 0;
DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
@@ -491,7 +499,6 @@ s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
u16 length = 0;
u16 pointer = 0;
u16 word = 0;
- u16 checksum_last_word = IXGBE_EEPROM_CHECKSUM;
u16 ptr_start = IXGBE_PCIE_ANALOG_PTR;
/* Do not use hw->eeprom.ops.read because we do not want to take
@@ -501,14 +508,15 @@ s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
- /* Include 0x0-0x3F in the checksum */
- for (i = 0; i <= checksum_last_word; i++) {
+ /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the
+ * checksum itself
+ */
+ for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
if (ixgbe_read_eerd_generic(hw, i, &word)) {
DEBUGOUT("EEPROM read failed\n");
return IXGBE_ERR_EEPROM;
}
- if (i != IXGBE_EEPROM_CHECKSUM)
- checksum += word;
+ checksum += word;
}
/* Include all data from pointers 0x3, 0x6-0xE. This excludes the
@@ -775,8 +783,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
/* SW NVM semaphore bit is used for access to all
* SW_FW_SYNC bits (not just NVM)
*/
- if (ixgbe_get_swfw_sync_semaphore(hw))
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
+ }
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (!(swfw_sync & (fwmask | swmask | hwmask))) {
@@ -798,6 +808,7 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
if (swmask == IXGBE_GSSR_SW_MNG_SM) {
ERROR_REPORT1(IXGBE_ERROR_POLLING,
"Failed to get SW only semaphore");
+ DEBUGOUT("Failed to get SW only semaphore, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
@@ -806,8 +817,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
* of the requested resource(s) while ignoring the corresponding FW/HW
* bits in the SW_FW_SYNC register.
*/
- if (ixgbe_get_swfw_sync_semaphore(hw))
+ if (ixgbe_get_swfw_sync_semaphore(hw)) {
+ DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
+ }
swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw));
if (swfw_sync & (fwmask | hwmask)) {
swfw_sync |= swmask;
@@ -829,9 +842,11 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
rmask |= IXGBE_GSSR_I2C_MASK;
ixgbe_release_swfw_sync_X540(hw, rmask);
ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
ixgbe_release_swfw_sync_semaphore(hw);
+ DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n");
return IXGBE_ERR_SWFW_SYNC;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_x540.h b/drivers/net/ixgbe/base/ixgbe_x540.h
index e4baf6ff..8a19ae2e 100644
--- a/drivers/net/ixgbe/base/ixgbe_x540.h
+++ b/drivers/net/ixgbe/base/ixgbe_x540.h
@@ -43,7 +43,7 @@ s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed,
bool link_up_wait_to_complete);
s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c
index acb8140c..674dc144 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.c
+++ b/drivers/net/ixgbe/base/ixgbe_x550.c
@@ -62,7 +62,7 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
mac->ops.dmac_config = ixgbe_dmac_config_X550;
mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550;
mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550;
- mac->ops.setup_eee = ixgbe_setup_eee_X550;
+ mac->ops.setup_eee = NULL;
mac->ops.set_source_address_pruning =
ixgbe_set_source_address_pruning_X550;
mac->ops.set_ethertype_anti_spoofing =
@@ -83,6 +83,8 @@ s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw)
mac->ops.mdd_event = ixgbe_mdd_event_X550;
mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550;
mac->ops.disable_rx = ixgbe_disable_rx_x550;
+ /* Manageability interface */
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550;
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_10G_T:
@@ -448,23 +450,161 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->phy.type = ixgbe_phy_x550em_xfi;
+ break;
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_A_10G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
return ixgbe_identify_phy_generic(hw);
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM;
+ break;
default:
break;
}
return IXGBE_SUCCESS;
}
+/**
+ * ixgbe_fw_phy_activity - Perform an activity on a PHY
+ * @hw: pointer to hardware structure
+ * @activity: activity to perform
+ * @data: Pointer to 4 32-bit words of data
+ */
+s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity,
+ u32 (*data)[FW_PHY_ACT_DATA_COUNT])
+{
+ union {
+ struct ixgbe_hic_phy_activity_req cmd;
+ struct ixgbe_hic_phy_activity_resp rsp;
+ } hic;
+ u16 retries = FW_PHY_ACT_RETRIES;
+ s32 rc;
+ u16 i;
+
+ do {
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity);
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]);
+
+ rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
+ sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (rc != IXGBE_SUCCESS)
+ return rc;
+ if (hic.rsp.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS) {
+ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i)
+ (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]);
+ return IXGBE_SUCCESS;
+ }
+ usec_delay(20);
+ --retries;
+ } while (retries > 0);
+
+ return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+}
+
+static const struct {
+ u16 fw_speed;
+ ixgbe_link_speed phy_speed;
+} ixgbe_fw_map[] = {
+ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL },
+ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL },
+ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL },
+ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL },
+};
+
+/**
+ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw)
+{
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ u16 phy_speeds;
+ u16 phy_id_lo;
+ s32 rc;
+ u16 i;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info);
+ if (rc)
+ return rc;
+
+ hw->phy.speeds_supported = 0;
+ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK;
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (phy_speeds & ixgbe_fw_map[i].fw_speed)
+ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed;
+ }
+ if (!hw->phy.autoneg_advertised)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK;
+ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK;
+ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK;
+ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK;
+ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_phy_fw - Get PHY type based on firmware command
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw)
+{
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+
+ hw->phy.type = ixgbe_phy_fw;
+ hw->phy.ops.read_reg = NULL;
+ hw->phy.ops.write_reg = NULL;
+ return ixgbe_get_phy_id_fw(hw);
+}
+
+/**
+ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY
+ * @hw: pointer to hardware structure
+ *
+ * Returns error code
+ */
+s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+
+ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF;
+ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup);
+}
+
STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
@@ -601,18 +741,20 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
else
mac->ops.setup_fc = ixgbe_setup_fc_X550em;
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_X550em;
switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_A_KR:
- case IXGBE_DEV_ID_X550EM_A_KR_L:
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ mac->ops.setup_fc = NULL;
+ phy->ops.identify = ixgbe_identify_phy_fw;
+ phy->ops.set_phy_power = NULL;
+ phy->ops.get_firmware_version = NULL;
break;
default:
- mac->ops.setup_eee = NULL;
+ phy->ops.identify = ixgbe_identify_phy_x550em;
}
- /* PHY */
- phy->ops.init = ixgbe_init_phy_ops_X550em;
- phy->ops.identify = ixgbe_identify_phy_x550em;
if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper)
phy->ops.set_phy_power = NULL;
@@ -631,6 +773,92 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
+{
+ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 };
+ s32 rc;
+ u16 i;
+
+ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
+ return 0;
+
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
+ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_full:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_rx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ case ixgbe_fc_tx_pause:
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX <<
+ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) {
+ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed)
+ setup[0] |= ixgbe_fw_map[i].fw_speed;
+ }
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN;
+
+ if (hw->phy.eee_speeds_advertised)
+ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE;
+
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup);
+ if (rc)
+ return rc;
+ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN)
+ return IXGBE_ERR_OVERTEMP;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Called at init time to set up flow control.
+ */
+static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw)
+{
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ return ixgbe_setup_fw_link(hw);
+}
+
+/**
+ * ixgbe_setup_eee_fw - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enable/disable EEE based on enable_eee flag.
+ * This function controls EEE for firmware-based PHY implementations.
+ */
+static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee)
+{
+ if (!!hw->phy.eee_speeds_advertised == enable_eee)
+ return IXGBE_SUCCESS;
+ if (enable_eee)
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ else
+ hw->phy.eee_speeds_advertised = 0;
+ return hw->phy.ops.setup_link(hw);
+}
+
+/**
* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type
* @hw: pointer to hardware structure
*
@@ -671,10 +899,18 @@ s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
break;
}
- if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T) ||
- (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L)) {
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a;
- mac->ops.setup_fc = ixgbe_setup_fc_sgmii_x550em_a;
+ mac->ops.setup_fc = ixgbe_fc_autoneg_fw;
+ mac->ops.setup_eee = ixgbe_setup_eee_fw;
+ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
+ break;
+ default:
+ break;
}
return ret_val;
@@ -709,6 +945,7 @@ s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw)
ixgbe_write_i2c_combined_generic_unlocked;
link->addr = IXGBE_CS4227;
+
return ret_val;
}
@@ -876,159 +1113,6 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
}
/**
- * ixgbe_enable_eee_x550 - Enable EEE support
- * @hw: pointer to hardware structure
- */
-STATIC s32 ixgbe_enable_eee_x550(struct ixgbe_hw *hw)
-{
- u16 autoneg_eee_reg;
- u32 link_reg;
- s32 status;
-
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Advertise EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_eee_reg);
-
- autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_eee_reg);
- return IXGBE_SUCCESS;
- }
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_A_KR:
- case IXGBE_DEV_ID_X550EM_A_KR_L:
- status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX;
-
- /* Don't advertise FEC capability when EEE enabled. */
- link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- break;
- default:
- break;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_disable_eee_x550 - Disable EEE support
- * @hw: pointer to hardware structure
- */
-STATIC s32 ixgbe_disable_eee_x550(struct ixgbe_hw *hw)
-{
- u16 autoneg_eee_reg;
- u32 link_reg;
- s32 status;
-
- if (hw->mac.type == ixgbe_mac_X550) {
- /* Disable advertised EEE capability */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_eee_reg);
-
- autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT |
- IXGBE_AUTO_NEG_1000BASE_EEE_ADVT |
- IXGBE_AUTO_NEG_100BASE_EEE_ADVT);
-
- hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- autoneg_eee_reg);
- return IXGBE_SUCCESS;
- }
-
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_A_KR:
- case IXGBE_DEV_ID_X550EM_A_KR_L:
- status = hw->mac.ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
-
- link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR |
- IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX);
-
- /* Advertise FEC capability when EEE is disabled. */
- link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC;
-
- status = hw->mac.ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg);
- if (status != IXGBE_SUCCESS)
- return status;
- break;
- default:
- break;
- }
-
- return IXGBE_SUCCESS;
-}
-
-/**
- * ixgbe_setup_eee_X550 - Enable/disable EEE support
- * @hw: pointer to the HW structure
- * @enable_eee: boolean flag to enable EEE
- *
- * Enable/disable EEE based on enable_eee flag.
- * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C
- * are modified.
- *
- **/
-s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee)
-{
- s32 status;
- u32 eeer;
-
- DEBUGFUNC("ixgbe_setup_eee_X550");
-
- eeer = IXGBE_READ_REG(hw, IXGBE_EEER);
- /* Enable or disable EEE per flag */
- if (enable_eee) {
- eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
- /* Not supported on first revision of X550EM_x. */
- if ((hw->mac.type == ixgbe_mac_X550EM_x) &&
- !(IXGBE_FUSES0_REV_MASK &
- IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0))))
- return IXGBE_SUCCESS;
-
- status = ixgbe_enable_eee_x550(hw);
- if (status)
- return status;
- } else {
- eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN);
-
- status = ixgbe_disable_eee_x550(hw);
- if (status)
- return status;
- }
- IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer);
-
- return IXGBE_SUCCESS;
-}
-
-/**
* ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning
* @hw: pointer to hardware structure
* @enable: enable or disable source address pruning
@@ -1227,13 +1311,20 @@ s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
sizeof(token_cmd),
IXGBE_HI_COMMAND_TIMEOUT,
true);
- if (status)
+ if (status) {
+ DEBUGOUT1("Issuing host interface command failed with Status = %d\n",
+ status);
return status;
+ }
if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
return IXGBE_SUCCESS;
- if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) {
+ DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n",
+ token_cmd.hdr.cmd_or_resp.ret_status);
return IXGBE_ERR_FW_RESP_INVALID;
+ }
+ DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n");
return IXGBE_ERR_TOKEN_RETRY;
}
@@ -1492,6 +1583,7 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_X_XFI:
case IXGBE_DEV_ID_X550EM_A_KR:
case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
@@ -1722,11 +1814,11 @@ STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed,
}
/**
- * ixgbe_setup_sgmii_m88 - Set up link for sgmii with Marvell PHYs
+ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-STATIC s32 ixgbe_setup_sgmii_m88(struct ixgbe_hw *hw, ixgbe_link_speed speed,
- bool autoneg_wait)
+STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
{
struct ixgbe_mac_info *mac = &hw->mac;
u32 lval, sval, flx_val;
@@ -1776,7 +1868,7 @@ STATIC s32 ixgbe_setup_sgmii_m88(struct ixgbe_hw *hw, ixgbe_link_speed speed,
return rc;
flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK;
- flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G;
+ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN;
flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN;
flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN;
flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN;
@@ -1826,7 +1918,9 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
if (hw->mac.type == ixgbe_mac_X550EM_a) {
if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T ||
hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) {
- mac->ops.setup_link = ixgbe_setup_sgmii_m88;
+ mac->ops.setup_link = ixgbe_setup_sgmii_fw;
+ mac->ops.check_link =
+ ixgbe_check_mac_link_generic;
} else {
mac->ops.setup_link =
ixgbe_setup_mac_link_t_X550em;
@@ -1859,6 +1953,12 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
DEBUGFUNC("ixgbe_get_link_capabilities_X550em");
+ if (hw->phy.type == ixgbe_phy_fw) {
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+ return 0;
+ }
+
/* SFP */
if (hw->phy.media_type == ixgbe_media_type_fiber) {
@@ -1882,11 +1982,7 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
*speed = IXGBE_LINK_SPEED_10GB_FULL;
} else {
switch (hw->phy.type) {
- case ixgbe_phy_m88:
- *speed = IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_10_FULL;
- break;
+ case ixgbe_phy_ext_1g_t:
case ixgbe_phy_sgmii:
*speed = IXGBE_LINK_SPEED_1GB_FULL;
break;
@@ -2024,19 +2120,32 @@ STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw)
status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc);
/* Enable link status change alarm */
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
- if (status != IXGBE_SUCCESS)
- return status;
+ /* Enable the LASI interrupts on X552 devices to receive notifications
+ * of the link configurations of the external PHY and correspondingly
+ * support the configuration of the internal iXFI link, since iXFI does
+ * not support auto-negotiation. This is not required for X553 devices
+ * having KR support, which performs auto-negotiations and which is used
+ * as the internal link to the external PHY. Hence adding a check here
+ * to avoid enabling LASI interrupts for X553 devices.
+ */
+ if (hw->mac.type != ixgbe_mac_X550EM_a) {
+ status = hw->phy.ops.read_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg);
- reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
+ if (status != IXGBE_SUCCESS)
+ return status;
- status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN;
- if (status != IXGBE_SUCCESS)
- return status;
+ status = hw->phy.ops.write_reg(hw,
+ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg);
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
/* Enable high temperature failure and global fault alarms */
status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK,
@@ -2150,262 +2259,47 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
}
/**
- * ixgbe_setup_m88 - setup m88 PHY
+ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs
* @hw: pointer to hardware structure
*/
-STATIC s32 ixgbe_setup_m88(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw)
{
- u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- u16 reg;
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
s32 rc;
if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
return IXGBE_SUCCESS;
- rc = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store);
if (rc)
return rc;
+ memset(store, 0, sizeof(store));
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- goto out;
- if (reg & IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN) {
- reg &= ~IXGBE_M88E1500_COPPER_CTRL_POWER_DOWN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- reg);
- }
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0, &reg);
- if (rc)
- goto out;
- if (reg & IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN) {
- reg &= ~IXGBE_M88E1500_MAC_CTRL_1_POWER_DOWN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0,
- reg);
- }
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 2);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_MAC_SPEC_CTRL, 0,
- &reg);
- if (rc)
- goto out;
- if (reg & IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN) {
- reg &= ~IXGBE_M88E1500_MAC_SPEC_CTRL_POWER_DOWN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_SPEC_CTRL, 0,
- reg);
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0,
- 0);
- if (rc)
- goto out;
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- &reg);
- if (rc)
- goto out;
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESET;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- reg);
- usec_delay(50);
- } else {
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0,
- 0);
- if (rc)
- goto out;
- }
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- goto out;
-
- if (!(reg & IXGBE_M88E1500_COPPER_CTRL_AN_EN)) {
- reg |= IXGBE_M88E1500_COPPER_CTRL_AN_EN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0,
- reg);
- }
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_1000T_CTRL, 0, &reg);
- if (rc)
- goto out;
- reg &= ~IXGBE_M88E1500_1000T_CTRL_HALF_DUPLEX;
- reg &= ~IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
- reg |= IXGBE_M88E1500_1000T_CTRL_FULL_DUPLEX;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_1000T_CTRL, 0, reg);
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_AN, 0, &reg);
- if (rc)
- goto out;
- reg &= ~IXGBE_M88E1500_COPPER_AN_T4;
- reg &= ~IXGBE_M88E1500_COPPER_AN_100TX_FD;
- reg &= ~IXGBE_M88E1500_COPPER_AN_100TX_HD;
- reg &= ~IXGBE_M88E1500_COPPER_AN_10TX_FD;
- reg &= ~IXGBE_M88E1500_COPPER_AN_10TX_HD;
-
- /* Flow control auto negotiation configuration was moved from here to
- * the function ixgbe_setup_fc_sgmii_x550em_a()
- */
-
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
- reg |= IXGBE_M88E1500_COPPER_AN_100TX_FD;
- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL)
- reg |= IXGBE_M88E1500_COPPER_AN_10TX_FD;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_AN, 0, reg);
-
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- goto out;
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESTART_AN;
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
-
-
- hw->mac.ops.release_swfw_sync(hw, mask);
- return rc;
-
-out:
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- hw->mac.ops.release_swfw_sync(hw, mask);
- return rc;
-}
-
-/**
- * ixgbe_reset_phy_m88e1500 - Reset m88e1500 PHY
- * @hw: pointer to hardware structure
- *
- * The PHY token must be held when calling this function.
- */
-static s32 ixgbe_reset_phy_m88e1500(struct ixgbe_hw *hw)
-{
- u16 reg;
- s32 rc;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store);
if (rc)
return rc;
- rc = hw->phy.ops.read_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, &reg);
- if (rc)
- return rc;
-
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESET;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
-
- usec_delay(10);
-
- return rc;
+ return ixgbe_setup_fw_link(hw);
}
/**
- * ixgbe_reset_phy_m88e1543 - Reset m88e1543 PHY
+ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp
* @hw: pointer to hardware structure
- *
- * The PHY token must be held when calling this function.
*/
-static s32 ixgbe_reset_phy_m88e1543(struct ixgbe_hw *hw)
+static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw)
{
- return hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
-}
-
-/**
- * ixgbe_reset_phy_m88 - Reset m88 PHY
- * @hw: pointer to hardware structure
- */
-STATIC s32 ixgbe_reset_phy_m88(struct ixgbe_hw *hw)
-{
- u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
- u16 reg;
+ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 };
s32 rc;
- if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw))
- return IXGBE_SUCCESS;
-
- rc = hw->mac.ops.acquire_swfw_sync(hw, mask);
+ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store);
if (rc)
return rc;
- switch (hw->phy.id) {
- case IXGBE_M88E1500_E_PHY_ID:
- rc = ixgbe_reset_phy_m88e1500(hw);
- break;
- case IXGBE_M88E1543_E_PHY_ID:
- rc = ixgbe_reset_phy_m88e1543(hw);
- break;
- default:
- rc = IXGBE_ERR_PHY;
- break;
+ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) {
+ ixgbe_shutdown_fw_phy(hw);
+ return IXGBE_ERR_OVERTEMP;
}
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_FIBER_CTRL_RESET |
- IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
- IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_FIBER_CTRL, 0, reg);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 18);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_GEN_CTRL_RESET |
- IXGBE_M88E1500_GEN_CTRL_MODE_SGMII_COPPER;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_GEN_CTRL, 0, reg);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_FIBER_CTRL_RESET |
- IXGBE_M88E1500_FIBER_CTRL_AN_EN |
- IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL |
- IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_FIBER_CTRL, 0, reg);
- if (rc)
- goto out;
-
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- if (rc)
- goto out;
-
- reg = (IXGBE_M88E1500_MAC_CTRL_1_DWN_4X <<
- IXGBE_M88E1500_MAC_CTRL_1_DWN_SHIFT) |
- (IXGBE_M88E1500_MAC_CTRL_1_ED_TM <<
- IXGBE_M88E1500_MAC_CTRL_1_ED_SHIFT) |
- (IXGBE_M88E1500_MAC_CTRL_1_MDIX_AUTO <<
- IXGBE_M88E1500_MAC_CTRL_1_MDIX_SHIFT);
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_MAC_CTRL_1, 0, reg);
- if (rc)
- goto out;
-
- reg = IXGBE_M88E1500_COPPER_CTRL_RESET |
- IXGBE_M88E1500_COPPER_CTRL_AN_EN |
- IXGBE_M88E1500_COPPER_CTRL_RESTART_AN |
- IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX |
- IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB;
- rc = hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_COPPER_CTRL, 0, reg);
- if (rc)
- goto out;
-
- hw->mac.ops.release_swfw_sync(hw, mask);
-
- /* In case of first reset set advertised speeds to default value */
- if (!hw->phy.autoneg_advertised)
- hw->phy.autoneg_advertised = IXGBE_LINK_SPEED_1GB_FULL |
- IXGBE_LINK_SPEED_100_FULL |
- IXGBE_LINK_SPEED_10_FULL;
-
- return ixgbe_setup_m88(hw);
-
-out:
- hw->phy.ops.write_reg_mdi(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0);
- hw->mac.ops.release_swfw_sync(hw, mask);
- return rc;
+ return IXGBE_SUCCESS;
}
/**
@@ -2450,6 +2344,9 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_init_phy_ops_X550em");
+ hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
@@ -2463,6 +2360,7 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a;
hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a;
+ phy->ops.check_overtemp = ixgbe_check_overtemp_fw;
if (hw->bus.lan_id)
hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM;
else
@@ -2482,13 +2380,18 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
break;
+ case IXGBE_DEV_ID_X550EM_X_1G_T:
+ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22;
+ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22;
+ break;
default:
break;
}
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
- if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
return ret_val;
/* Setup function pointers based on detected hardware */
@@ -2508,6 +2411,16 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
break;
+ case ixgbe_phy_ext_1g_t:
+ /* link is managed by FW */
+ phy->ops.setup_link = NULL;
+ break;
+ case ixgbe_phy_x550em_xfi:
+ /* link is managed by HW */
+ phy->ops.setup_link = NULL;
+ phy->ops.read_reg = ixgbe_read_phy_reg_x550em;
+ phy->ops.write_reg = ixgbe_write_phy_reg_x550em;
+ break;
case ixgbe_phy_x550em_ext_t:
/* If internal link mode is XFI, then setup iXFI internal link,
* else setup KR now.
@@ -2527,9 +2440,9 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
case ixgbe_phy_sgmii:
phy->ops.setup_link = NULL;
break;
- case ixgbe_phy_m88:
- phy->ops.setup_link = ixgbe_setup_m88;
- phy->ops.reset = ixgbe_reset_phy_m88;
+ case ixgbe_phy_fw:
+ phy->ops.setup_link = ixgbe_setup_fw_link;
+ phy->ops.reset = ixgbe_reset_phy_fw;
break;
default:
break;
@@ -2549,8 +2462,6 @@ STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_X_10G_T:
case IXGBE_DEV_ID_X550EM_A_SGMII:
case IXGBE_DEV_ID_X550EM_A_SGMII_L:
- case IXGBE_DEV_ID_X550EM_A_1G_T:
- case IXGBE_DEV_ID_X550EM_A_1G_T_L:
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_QSFP:
@@ -2559,6 +2470,13 @@ STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
break;
+ case IXGBE_DEV_ID_X550EM_A_1G_T:
+ case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ /* Select fast MDIO clock speed for these devices */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 |= IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
default:
break;
}
@@ -2579,14 +2497,16 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
u32 ctrl = 0;
u32 i;
bool link_up = false;
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
DEBUGFUNC("ixgbe_reset_hw_X550em");
/* Call adapter stop to disable Tx/Rx and clear interrupts */
status = hw->mac.ops.stop_adapter(hw);
- if (status != IXGBE_SUCCESS)
+ if (status != IXGBE_SUCCESS) {
+ DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status);
return status;
-
+ }
/* flush pending Tx transactions */
ixgbe_clear_tx_pending(hw);
@@ -2595,14 +2515,23 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
/* PHY ops must be identified and initialized prior to reset */
status = hw->phy.ops.init(hw);
- if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ if (status)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+ DEBUGOUT("Returning from reset HW due to PHY init failure\n");
return status;
+ }
/* start the external PHY */
if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
status = ixgbe_init_ext_t_x550em(hw);
- if (status)
+ if (status) {
+ DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n",
+ status);
return status;
+ }
}
/* Setup SFP module if there is one present. */
@@ -2615,8 +2544,10 @@ s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
return status;
/* Reset PHY */
- if (!hw->phy.reset_disable && hw->phy.ops.reset)
- hw->phy.ops.reset(hw);
+ if (!hw->phy.reset_disable && hw->phy.ops.reset) {
+ if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP)
+ return IXGBE_ERR_OVERTEMP;
+ }
mac_reset_top:
/* Issue global reset to the MAC. Needs to be SW reset if link is up.
@@ -2631,9 +2562,17 @@ mac_reset_top:
ctrl = IXGBE_CTRL_RST;
}
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
/* Poll for reset bit to self-clear meaning reset is complete */
for (i = 0; i < 10; i++) {
@@ -2674,6 +2613,9 @@ mac_reset_top:
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
ixgbe_setup_mux_ctl(hw);
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Reset HW failed, STATUS = %d\n", status);
+
return status;
}
@@ -2723,14 +2665,16 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
/**
* ixgbe_setup_kr_x550em - Configure the KR PHY.
* @hw: pointer to hardware structure
- *
- * Configures the integrated KR PHY for X550EM_x.
**/
s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
- if (hw->mac.type != ixgbe_mac_X550EM_x)
+ /* leave link alone for 2.5G */
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
return IXGBE_SUCCESS;
+ if (ixgbe_check_reset_blocked(hw))
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -2762,53 +2706,18 @@ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw,
if (ret_val != IXGBE_SUCCESS)
return ret_val;
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
- /* Configure CS4227 LINE side to 10G SR. */
- reg_slice = IXGBE_CS4227_LINE_SPARE22_MSB +
- (hw->bus.lan_id << 12);
- reg_val = IXGBE_CS4227_SPEED_10G;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
- (hw->bus.lan_id << 12);
+ /* Configure CS4227 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
+ (hw->bus.lan_id << 12);
+ if (setup_linear)
+ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
+ else
reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
-
- /* Configure CS4227 for HOST connection rate then type. */
- reg_slice = IXGBE_CS4227_HOST_SPARE22_MSB +
- (hw->bus.lan_id << 12);
- reg_val = (speed & IXGBE_LINK_SPEED_10GB_FULL) ?
- IXGBE_CS4227_SPEED_10G : IXGBE_CS4227_SPEED_1G;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
-
- reg_slice = IXGBE_CS4227_HOST_SPARE24_LSB +
- (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
-
- /* Setup XFI internal link. */
- ret_val = ixgbe_setup_ixfi_x550em(hw, &speed);
- } else {
- /* Configure internal PHY for KR/KX. */
- ixgbe_setup_kr_speed_x550em(hw, speed);
-
- /* Configure CS4227 LINE side to proper mode. */
- reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB +
- (hw->bus.lan_id << 12);
- if (setup_linear)
- reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
- else
- reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
- ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
- reg_val);
- }
+ ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice,
+ reg_val);
return ret_val;
}
@@ -2922,8 +2831,8 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
return IXGBE_ERR_PHY_ADDR_INVALID;
}
- /* Get external PHY device id */
- ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ /* Get external PHY SKU id */
+ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU,
IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
if (ret_val != IXGBE_SUCCESS)
@@ -2932,7 +2841,7 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
/* When configuring quad port CS4223, the MAC instance is part
* of the slice offset.
*/
- if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ if (reg_phy_ext == IXGBE_CS4223_SKU_ID)
slice_offset = (hw->bus.lan_id +
(hw->bus.instance_id << 1)) << 12;
else
@@ -2940,12 +2849,26 @@ s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw,
/* Configure CS4227/CS4223 LINE side to proper mode. */
reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) |
+ (IXGBE_CS4227_EDC_MODE_SR << 1));
+
if (setup_linear)
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1;
else
reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1;
ret_val = hw->phy.ops.write_reg(hw, reg_slice,
IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext);
+
+ /* Flush previous write with a read */
+ ret_val = hw->phy.ops.read_reg(hw, reg_slice,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
}
return ret_val;
}
@@ -3033,6 +2956,10 @@ STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
s32 status;
u32 reg_val;
+ /* iXFI is only supported with X552 */
+ if (mac->type != ixgbe_mac_X550EM_x)
+ return IXGBE_ERR_LINK_SETUP;
+
/* Disable AN and force speed to 10G Serial. */
status = mac->ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
@@ -3129,7 +3056,8 @@ s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
return IXGBE_ERR_CONFIG;
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
/* If link is down, there is no setup necessary so return */
status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up);
if (status != IXGBE_SUCCESS)
@@ -3745,9 +3673,9 @@ s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
*
* Determines physical layer capabilities of the current configuration.
**/
-u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
+u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
{
- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
u16 ext_ability = 0;
DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em");
@@ -3756,6 +3684,21 @@ u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
switch (hw->phy.type) {
case ixgbe_phy_x550em_kr:
+ if (hw->mac.type == ixgbe_mac_X550EM_a) {
+ if (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ break;
+ } else if (hw->device_id ==
+ IXGBE_DEV_ID_X550EM_A_KR_L) {
+ physical_layer =
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ }
+ }
+ /* fall through */
+ case ixgbe_phy_x550em_xfi:
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
IXGBE_PHYSICAL_LAYER_1000BASE_KX;
break;
@@ -3772,6 +3715,20 @@ u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
+ case ixgbe_phy_fw:
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+ break;
+ case ixgbe_phy_sgmii:
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
+ case ixgbe_phy_ext_1g_t:
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ break;
default:
break;
}
@@ -4071,6 +4028,9 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw)
/* This device does not fully support AN. */
hw->fc.disable_fc_autoneg = true;
break;
+ case IXGBE_DEV_ID_X550EM_X_XFI:
+ hw->fc.disable_fc_autoneg = true;
+ break;
default:
break;
}
@@ -4177,7 +4137,7 @@ void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw)
void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
- u16 reg, pcs_an_lp, pcs_an;
+ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 };
ixgbe_link_speed speed;
bool link_up;
@@ -4199,34 +4159,20 @@ void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw)
}
/* Check if auto-negotiation has completed */
- status = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_STATUS,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
+ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info);
if (status != IXGBE_SUCCESS ||
- (reg & IXGBE_M88E1500_COPPER_STATUS_AN_DONE) == 0) {
+ !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) {
DEBUGOUT("Auto-Negotiation did not complete\n");
status = IXGBE_ERR_FC_NOT_NEGOTIATED;
goto out;
}
- /* Get the advertized flow control */
- status = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_AN,
- IXGBE_MDIO_ZERO_DEV_TYPE, &pcs_an);
- if (status != IXGBE_SUCCESS)
- goto out;
-
- /* Get link partner's flow control */
- status = hw->phy.ops.read_reg(hw,
- IXGBE_M88E1500_COPPER_AN_LP_ABILITY,
- IXGBE_MDIO_ZERO_DEV_TYPE, &pcs_an_lp);
- if (status != IXGBE_SUCCESS)
- goto out;
-
/* Negotiate the flow control */
- status = ixgbe_negotiate_fc(hw, (u32)pcs_an, (u32)pcs_an_lp,
- IXGBE_M88E1500_COPPER_AN_PAUSE,
- IXGBE_M88E1500_COPPER_AN_AS_PAUSE,
- IXGBE_M88E1500_COPPER_AN_LP_PAUSE,
- IXGBE_M88E1500_COPPER_AN_LP_AS_PAUSE);
+ status = ixgbe_negotiate_fc(hw, info[0], info[0],
+ FW_PHY_ACT_GET_LINK_INFO_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_FC_TX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX,
+ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX);
out:
if (status == IXGBE_SUCCESS) {
@@ -4238,83 +4184,6 @@ out:
}
/**
- * ixgbe_setup_fc_sgmii_x550em_a - Set up flow control
- * @hw: pointer to hardware structure
- *
- * Called at init time to set up flow control.
- **/
-s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw)
-{
- u16 reg;
- s32 rc;
-
- /* Validate the requested mode */
- if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
- ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED,
- "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
- return IXGBE_ERR_INVALID_LINK_SETTINGS;
- }
-
- if (hw->fc.requested_mode == ixgbe_fc_default)
- hw->fc.requested_mode = ixgbe_fc_full;
-
- /* Read contents of the Auto-Negotiation register, page 0 reg 4 */
- rc = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_AN,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
- if (rc)
- goto out;
-
- /* Disable all the settings related to Flow control Auto-negotiation */
- reg &= ~IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- reg &= ~IXGBE_M88E1500_COPPER_AN_PAUSE;
-
- /* Configure the Asymmetric and symmetric pause according to the user
- * requested mode.
- */
- switch (hw->fc.requested_mode) {
- case ixgbe_fc_full:
- reg |= IXGBE_M88E1500_COPPER_AN_PAUSE;
- reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- break;
- case ixgbe_fc_rx_pause:
- reg |= IXGBE_M88E1500_COPPER_AN_PAUSE;
- reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- break;
- case ixgbe_fc_tx_pause:
- reg |= IXGBE_M88E1500_COPPER_AN_AS_PAUSE;
- break;
- default:
- break;
- }
-
- /* Write back to the Auto-Negotiation register with newly configured
- * fields
- */
- hw->phy.ops.write_reg(hw, IXGBE_M88E1500_COPPER_AN,
- IXGBE_MDIO_ZERO_DEV_TYPE, reg);
-
- /* In this section of the code we restart Auto-negotiation */
-
- /* Read the CONTROL register, Page 0 reg 0 */
- rc = hw->phy.ops.read_reg(hw, IXGBE_M88E1500_COPPER_CTRL,
- IXGBE_MDIO_ZERO_DEV_TYPE, &reg);
- if (rc)
- goto out;
-
- /* Set the bit to restart Auto-Neg. The bit to enable Auto-neg is ON
- * by default
- */
- reg |= IXGBE_M88E1500_COPPER_CTRL_RESTART_AN;
-
- /* write the new values to the register to restart Auto-Negotiation */
- hw->phy.ops.write_reg(hw, IXGBE_M88E1500_COPPER_CTRL,
- IXGBE_MDIO_ZERO_DEV_TYPE, reg);
-
-out:
- return rc;
-}
-
-/**
* ixgbe_setup_fc_backplane_x550em_a - Set up flow control
* @hw: pointer to hardware structure
*
@@ -4481,21 +4350,34 @@ STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
status = IXGBE_SUCCESS;
if (hmask)
status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
- if (status)
+ if (status) {
+ DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
+ status);
return status;
+ }
if (!(mask & IXGBE_GSSR_TOKEN_SM))
return IXGBE_SUCCESS;
status = ixgbe_get_phy_token(hw);
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
+ status);
+
if (status == IXGBE_SUCCESS)
return IXGBE_SUCCESS;
if (hmask)
ixgbe_release_swfw_sync_X540(hw, hmask);
- if (status != IXGBE_ERR_TOKEN_RETRY)
+
+ if (status != IXGBE_ERR_TOKEN_RETRY) {
+ DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
+ status);
return status;
+ }
}
+ DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
+ hw->phy.id);
return status;
}
@@ -4631,8 +4513,10 @@ s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw,
else
force_speed = IXGBE_LINK_SPEED_1GB_FULL;
- /* If internal link mode is XFI, then setup XFI internal link. */
- if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
+ /* If X552 and internal link mode is XFI, then setup XFI internal link.
+ */
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) {
status = ixgbe_setup_ixfi_x550em(hw, &force_speed);
if (status != IXGBE_SUCCESS)
@@ -4655,7 +4539,7 @@ s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
u32 status;
- u16 autoneg_status;
+ u16 i, autoneg_status = 0;
if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper)
return IXGBE_ERR_CONFIG;
@@ -4668,21 +4552,18 @@ s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
return status;
/* MAC link is up, so check external PHY link.
- * Read this twice back to back to indicate current status.
+ * X557 PHY. Link status is latching low, and can only be used to detect
+ * link drop, and not the current status of the link without performing
+ * back-to-back reads.
*/
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
-
- if (status != IXGBE_SUCCESS)
- return status;
-
- status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
- IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
- &autoneg_status);
+ for (i = 0; i < 2; i++) {
+ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+ IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+ &autoneg_status);
- if (status != IXGBE_SUCCESS)
- return status;
+ if (status != IXGBE_SUCCESS)
+ return status;
+ }
/* If external PHY link is not up, then indicate link not up */
if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS))
@@ -4729,7 +4610,8 @@ s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
- return IXGBE_SUCCESS;
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_on_generic(hw, led_idx);
}
/**
@@ -4753,5 +4635,67 @@ s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx)
ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data);
- return IXGBE_SUCCESS;
+ /* Some designs have the LEDs wired to the MAC */
+ return ixgbe_led_off_generic(hw, led_idx);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return IXGBE_SUCCESS
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub, u16 len, const char *driver_ver)
+{
+ struct ixgbe_hic_drv_info2 fw_cmd;
+ s32 ret_val = IXGBE_SUCCESS;
+ int i;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_x550");
+
+ if ((len == 0) || (driver_ver == NULL) ||
+ (len > sizeof(fw_cmd.driver_string)))
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ memcpy(fw_cmd.driver_string, driver_ver, len);
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ sizeof(fw_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (ret_val != IXGBE_SUCCESS)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = IXGBE_SUCCESS;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ return ret_val;
}
diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h
index cd4db29c..6d188741 100644
--- a/drivers/net/ixgbe/base/ixgbe_x550.h
+++ b/drivers/net/ixgbe/base/ixgbe_x550.h
@@ -57,8 +57,6 @@ s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset,
u16 *data);
s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
u16 data);
-s32 ixgbe_set_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
-s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee);
void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable,
unsigned int pool);
void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
@@ -67,6 +65,8 @@ s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 data);
s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u32 *data);
+s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver, u16 len, const char *str);
s32 ixgbe_get_phy_token(struct ixgbe_hw *);
s32 ixgbe_put_phy_token(struct ixgbe_hw *);
s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
@@ -88,7 +88,7 @@ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw);
s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw);
-u32 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw);
void ixgbe_disable_rx_x550(struct ixgbe_hw *hw);
s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed);
s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index bac36e0d..2083cded 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -56,10 +56,12 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
+#include <rte_hash_crc.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -72,8 +74,6 @@
#include "base/ixgbe_phy.h"
#include "ixgbe_regs.h"
-#include "rte_pmd_ixgbe.h"
-
/*
* High threshold controlling when to start sending XOFF frames. Must be at
* least 8 bytes less than receive packet buffer size. This value is in units
@@ -154,17 +154,16 @@
#define IXGBE_QDE_STRIP_TAG 0x00000004
#define IXGBE_VTEICR_MASK 0x07
-enum ixgbevf_xcast_modes {
- IXGBEVF_XCAST_MODE_NONE = 0,
- IXGBEVF_XCAST_MODE_MULTI,
- IXGBEVF_XCAST_MODE_ALLMULTI,
-};
-
#define IXGBE_EXVET_VET_EXT_SHIFT 16
#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
static int ixgbe_dev_start(struct rte_eth_dev *dev);
static void ixgbe_dev_stop(struct rte_eth_dev *dev);
@@ -183,16 +182,27 @@ static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
+static int
+ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int size);
static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+static int ixgbe_dev_xstats_get_names_by_id(
+ __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
uint8_t is_rx);
+static int ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
+ size_t fw_size);
static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static const uint32_t *ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
@@ -231,18 +241,21 @@ static int ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
uint16_t reta_size);
static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
static int ixgbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
-static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
-static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *handle);
+static void ixgbe_dev_interrupt_handler(void *param);
static void ixgbe_dev_interrupt_delayed_handler(void *param);
-static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
+static bool is_device_supported(struct rte_eth_dev *dev,
+ struct rte_pci_driver *drv);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
@@ -276,12 +289,6 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
ether_addr * mac_addr, uint8_t on);
static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
-static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
- uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
-static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
@@ -297,18 +304,13 @@ static void ixgbe_configure_msix(struct rte_eth_dev *dev);
static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t tx_rate);
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk);
-static void ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
- struct ether_addr *mac_addr,
- uint32_t index, uint32_t pool);
+static int ixgbevf_add_mac_addr(struct rte_eth_dev *dev,
+ struct ether_addr *mac_addr,
+ uint32_t index, uint32_t pool);
static void ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
static void ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
- bool add);
static int ixgbe_syn_filter_get(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter);
static int ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
@@ -318,17 +320,11 @@ static int ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
static void ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter);
-static int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
- struct rte_eth_ntuple_filter *filter,
- bool add);
static int ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
static int ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *filter);
-static int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
- struct rte_eth_ethertype_filter *filter,
- bool add);
static int ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg);
@@ -371,8 +367,7 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev,
struct timespec *timestamp);
static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
const struct timespec *timestamp);
-static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static void ixgbevf_dev_interrupt_handler(void *param);
static int ixgbe_dev_l2_tunnel_eth_type_conf
(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
@@ -389,6 +384,8 @@ static int ixgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
+static int ixgbe_filter_restore(struct rte_eth_dev *dev);
+static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
/*
* Define VF Stats MACRO for Non "cleared on read" register
@@ -517,6 +514,8 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
.nb_max = IXGBE_MAX_RING_DESC,
.nb_min = IXGBE_MIN_RING_DESC,
.nb_align = IXGBE_TXD_ALIGN,
+ .nb_seg_max = IXGBE_TX_MAX_SEG,
+ .nb_mtu_seg_max = IXGBE_TX_MAX_SEG,
};
static const struct eth_dev_ops ixgbe_eth_dev_ops = {
@@ -533,10 +532,13 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.link_update = ixgbe_dev_link_update,
.stats_get = ixgbe_dev_stats_get,
.xstats_get = ixgbe_dev_xstats_get,
+ .xstats_get_by_id = ixgbe_dev_xstats_get_by_id,
.stats_reset = ixgbe_dev_stats_reset,
.xstats_reset = ixgbe_dev_xstats_reset,
.xstats_get_names = ixgbe_dev_xstats_get_names,
+ .xstats_get_names_by_id = ixgbe_dev_xstats_get_names_by_id,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
+ .fw_version_get = ixgbe_fw_version_get,
.dev_infos_get = ixgbe_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
.mtu_set = ixgbe_dev_mtu_set,
@@ -554,6 +556,8 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_queue_count = ixgbe_dev_rx_queue_count,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.dev_led_on = ixgbe_dev_led_on,
@@ -568,12 +572,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = {
.uc_all_hash_table_set = ixgbe_uc_all_hash_table_set,
.mirror_rule_set = ixgbe_mirror_rule_set,
.mirror_rule_reset = ixgbe_mirror_rule_reset,
- .set_vf_rx_mode = ixgbe_set_pool_rx_mode,
- .set_vf_rx = ixgbe_set_pool_rx,
- .set_vf_tx = ixgbe_set_pool_tx,
- .set_vf_vlan_filter = ixgbe_set_pool_vlan_filter,
.set_queue_rate_limit = ixgbe_set_queue_rate_limit,
- .set_vf_rate_limit = ixgbe_set_vf_rate_limit,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
#ifdef RTE_NIC_BYPASS
@@ -637,6 +636,8 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = {
.rx_queue_setup = ixgbe_dev_rx_queue_setup,
.rx_queue_release = ixgbe_dev_rx_queue_release,
.rx_descriptor_done = ixgbe_dev_rx_descriptor_done,
+ .rx_descriptor_status = ixgbe_dev_rx_descriptor_status,
+ .tx_descriptor_status = ixgbe_dev_tx_descriptor_status,
.tx_queue_setup = ixgbe_dev_tx_queue_setup,
.tx_queue_release = ixgbe_dev_tx_queue_release,
.rx_queue_intr_enable = ixgbevf_dev_rx_queue_intr_enable,
@@ -744,6 +745,51 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_stats_strings[] = {
#define IXGBE_NB_HW_STATS (sizeof(rte_ixgbe_stats_strings) / \
sizeof(rte_ixgbe_stats_strings[0]))
+/* MACsec statistics */
+static const struct rte_ixgbe_xstats_name_off rte_ixgbe_macsec_strings[] = {
+ {"out_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_untagged)},
+ {"out_pkts_encrypted", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_encrypted)},
+ {"out_pkts_protected", offsetof(struct ixgbe_macsec_stats,
+ out_pkts_protected)},
+ {"out_octets_encrypted", offsetof(struct ixgbe_macsec_stats,
+ out_octets_encrypted)},
+ {"out_octets_protected", offsetof(struct ixgbe_macsec_stats,
+ out_octets_protected)},
+ {"in_pkts_untagged", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_untagged)},
+ {"in_pkts_badtag", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_badtag)},
+ {"in_pkts_nosci", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_nosci)},
+ {"in_pkts_unknownsci", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unknownsci)},
+ {"in_octets_decrypted", offsetof(struct ixgbe_macsec_stats,
+ in_octets_decrypted)},
+ {"in_octets_validated", offsetof(struct ixgbe_macsec_stats,
+ in_octets_validated)},
+ {"in_pkts_unchecked", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unchecked)},
+ {"in_pkts_delayed", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_delayed)},
+ {"in_pkts_late", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_late)},
+ {"in_pkts_ok", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_ok)},
+ {"in_pkts_invalid", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_invalid)},
+ {"in_pkts_notvalid", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_notvalid)},
+ {"in_pkts_unusedsa", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_unusedsa)},
+ {"in_pkts_notusingsa", offsetof(struct ixgbe_macsec_stats,
+ in_pkts_notusingsa)},
+};
+
+#define IXGBE_NB_MACSEC_STATS (sizeof(rte_ixgbe_macsec_strings) / \
+ sizeof(rte_ixgbe_macsec_strings[0]))
+
/* Per-queue statistics */
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = {
{"mbuf_allocation_errors", offsetof(struct ixgbe_hw_stats, rnbc)},
@@ -859,6 +905,8 @@ ixgbe_pf_reset_hw(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
IXGBE_WRITE_FLUSH(hw);
+ if (status == IXGBE_ERR_SFP_NOT_PRESENT)
+ status = IXGBE_SUCCESS;
return status;
}
@@ -1083,7 +1131,8 @@ ixgbe_swfw_lock_reset(struct ixgbe_hw *hw)
static int
eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta *shadow_vfta =
@@ -1094,6 +1143,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
IXGBE_DEV_PRIVATE_TO_DCB_CFG(eth_dev->data->dev_private);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct ixgbe_bw_conf *bw_conf =
+ IXGBE_DEV_PRIVATE_TO_BW_CONF(eth_dev->data->dev_private);
uint32_t ctrl_ext;
uint16_t csum;
int diag, i;
@@ -1103,6 +1154,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+ eth_dev->tx_pkt_prepare = &ixgbe_prep_pkts;
/*
* For secondary processes, we don't initialise any further as primary
@@ -1127,9 +1179,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = eth_dev->pci_dev;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -1196,6 +1248,9 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
diag = ixgbe_init_hw(hw);
}
+ if (diag == IXGBE_ERR_SFP_NOT_PRESENT)
+ diag = IXGBE_SUCCESS;
+
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
"LOM. Please be aware there may be issues associated "
@@ -1272,20 +1327,37 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id);
- rte_intr_callback_register(&pci_dev->intr_handle,
- ixgbe_dev_interrupt_handler,
- (void *)eth_dev);
+ rte_intr_callback_register(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
/* enable uio/vfio intr/eventfd mapping */
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
/* enable support intr */
ixgbe_enable_intr(eth_dev);
+ /* initialize filter info */
+ memset(filter_info, 0,
+ sizeof(struct ixgbe_filter_info));
+
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
- memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ /* initialize flow director filter list & hash */
+ ixgbe_fdir_filter_init(eth_dev);
+
+ /* initialize l2 tunnel filter list & hash */
+ ixgbe_l2_tn_filter_init(eth_dev);
+
+ TAILQ_INIT(&filter_ntuple_list);
+ TAILQ_INIT(&filter_ethertype_list);
+ TAILQ_INIT(&filter_syn_list);
+ TAILQ_INIT(&filter_fdir_list);
+ TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&ixgbe_flow_list);
+
+ /* initialize bandwidth configuration info */
+ memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf));
return 0;
}
@@ -1293,7 +1365,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
PMD_INIT_FUNC_TRACE();
@@ -1302,7 +1375,6 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
return -EPERM;
hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- pci_dev = eth_dev->pci_dev;
if (hw->adapter_stopped == 0)
ixgbe_dev_close(eth_dev);
@@ -1315,9 +1387,9 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
ixgbe_swfw_lock_reset(hw);
/* disable uio intr before callback unregister */
- rte_intr_disable(&(pci_dev->intr_handle));
- rte_intr_callback_unregister(&(pci_dev->intr_handle),
- ixgbe_dev_interrupt_handler, (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ixgbe_dev_interrupt_handler, eth_dev);
/* uninitialize PF if max_vfs not zero */
ixgbe_pf_host_uninit(eth_dev);
@@ -1328,9 +1400,154 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->hash_mac_addrs);
eth_dev->data->hash_mac_addrs = NULL;
+ /* remove all the fdir filters & hash */
+ ixgbe_fdir_filter_uninit(eth_dev);
+
+ /* remove all the L2 tunnel filters & hash */
+ ixgbe_l2_tn_filter_uninit(eth_dev);
+
+ /* Remove all ntuple filters of the device */
+ ixgbe_ntuple_filter_uninit(eth_dev);
+
+ /* clear all the filters list */
+ ixgbe_filterlist_flush();
+
return 0;
}
+static int ixgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+ TAILQ_REMOVE(&filter_info->fivetuple_list,
+ p_5tuple,
+ entries);
+ rte_free(p_5tuple);
+ }
+ memset(filter_info->fivetuple_mask, 0,
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ if (fdir_info->hash_map)
+ rte_free(fdir_info->hash_map);
+ if (fdir_info->hash_handle)
+ rte_hash_free(fdir_info->hash_handle);
+
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ if (l2_tn_info->hash_map)
+ rte_free(l2_tn_info->hash_map);
+ if (l2_tn_info->hash_handle)
+ rte_hash_free(l2_tn_info->hash_handle);
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+ l2_tn_filter,
+ entries);
+ rte_free(l2_tn_filter);
+ }
+
+ return 0;
+}
+
+static int ixgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(eth_dev->data->dev_private);
+ char fdir_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters fdir_hash_params = {
+ .name = fdir_hash_name,
+ .entries = IXGBE_MAX_FDIR_FILTER_NUM,
+ .key_len = sizeof(union ixgbe_atr_input),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&fdir_info->fdir_list);
+ snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+ "fdir_%s", eth_dev->data->name);
+ fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+ if (!fdir_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+ return -EINVAL;
+ }
+ fdir_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_fdir_filter *) *
+ IXGBE_MAX_FDIR_FILTER_NUM,
+ 0);
+ if (!fdir_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for fdir hash map!");
+ return -ENOMEM;
+ }
+ fdir_info->mask_added = FALSE;
+
+ return 0;
+}
+
+static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(eth_dev->data->dev_private);
+ char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+ struct rte_hash_parameters l2_tn_hash_params = {
+ .name = l2_tn_hash_name,
+ .entries = IXGBE_MAX_L2_TN_FILTER_NUM,
+ .key_len = sizeof(struct ixgbe_l2_tn_key),
+ .hash_func = rte_hash_crc,
+ .hash_func_init_val = 0,
+ .socket_id = rte_socket_id(),
+ };
+
+ TAILQ_INIT(&l2_tn_info->l2_tn_list);
+ snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+ "l2_tn_%s", eth_dev->data->name);
+ l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+ if (!l2_tn_info->hash_handle) {
+ PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+ return -EINVAL;
+ }
+ l2_tn_info->hash_map = rte_zmalloc("ixgbe",
+ sizeof(struct ixgbe_l2_tn_filter *) *
+ IXGBE_MAX_L2_TN_FILTER_NUM,
+ 0);
+ if (!l2_tn_info->hash_map) {
+ PMD_INIT_LOG(ERR,
+ "Failed to allocate memory for L2 TN hash map!");
+ return -ENOMEM;
+ }
+ l2_tn_info->e_tag_en = FALSE;
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ l2_tn_info->e_tag_ether_type = DEFAULT_ETAG_ETYPE;
+
+ return 0;
+}
/*
* Negotiate mailbox API version with the PF.
* After reset API version is always set to the basic one (ixgbe_mbox_api_10).
@@ -1381,7 +1598,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
{
int diag;
uint32_t tc, tcs;
- struct rte_pci_device *pci_dev;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
struct ixgbe_vfta *shadow_vfta =
@@ -1419,9 +1637,8 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
@@ -1513,10 +1730,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
return -EIO;
}
- rte_intr_callback_register(&pci_dev->intr_handle,
- ixgbevf_dev_interrupt_handler,
- (void *)eth_dev);
- rte_intr_enable(&pci_dev->intr_handle);
+ rte_intr_callback_register(intr_handle,
+ ixgbevf_dev_interrupt_handler, eth_dev);
+ rte_intr_enable(intr_handle);
ixgbevf_intr_enable(hw);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
@@ -1531,8 +1747,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw;
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
PMD_INIT_FUNC_TRACE();
@@ -1554,40 +1771,52 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
- rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(&pci_dev->intr_handle,
- ixgbevf_dev_interrupt_handler,
- (void *)eth_dev);
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ ixgbevf_dev_interrupt_handler, eth_dev);
return 0;
}
-static struct eth_driver rte_ixgbe_pmd = {
- .pci_drv = {
- .id_table = pci_id_ixgbe_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_ixgbe_dev_init,
- .eth_dev_uninit = eth_ixgbe_dev_uninit,
- .dev_private_size = sizeof(struct ixgbe_adapter),
+static int eth_ixgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ixgbe_adapter), eth_ixgbe_dev_init);
+}
+
+static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_ixgbe_pmd = {
+ .id_table = pci_id_ixgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_ixgbe_pci_probe,
+ .remove = eth_ixgbe_pci_remove,
};
+static int eth_ixgbevf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct ixgbe_adapter), eth_ixgbevf_dev_init);
+}
+
+static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_ixgbevf_dev_uninit);
+}
+
/*
* virtual function driver struct
*/
-static struct eth_driver rte_ixgbevf_pmd = {
- .pci_drv = {
- .id_table = pci_id_ixgbevf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_ixgbevf_dev_init,
- .eth_dev_uninit = eth_ixgbevf_dev_uninit,
- .dev_private_size = sizeof(struct ixgbe_adapter),
+static struct rte_pci_driver rte_ixgbevf_pmd = {
+ .id_table = pci_id_ixgbevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_ixgbevf_pci_probe,
+ .remove = eth_ixgbevf_pci_remove,
};
static int
@@ -1947,6 +2176,8 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
static int
ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+
switch (nb_rx_q) {
case 1:
case 2:
@@ -1960,7 +2191,7 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
}
RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
- RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q;
return 0;
}
@@ -2180,6 +2411,80 @@ ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
}
}
+int
+ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ struct rte_eth_link link;
+ uint8_t nb_q_per_pool;
+ uint32_t queue_stride;
+ uint32_t queue_idx, idx = 0, vf_idx;
+ uint32_t queue_end;
+ uint16_t total_rate = 0;
+ struct rte_pci_device *pci_dev;
+
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+ rte_eth_link_get_nowait(dev->data->port_id, &link);
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (tx_rate > link.link_speed)
+ return -EINVAL;
+
+ if (q_msk == 0)
+ return 0;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ queue_idx = vf * queue_stride;
+ queue_end = queue_idx + nb_q_per_pool - 1;
+ if (queue_end >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (vfinfo) {
+ for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
+ if (vf_idx == vf)
+ continue;
+ for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+ idx++)
+ total_rate += vfinfo[vf_idx].tx_rate[idx];
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ /* Store tx_rate for this vf. */
+ for (idx = 0; idx < nb_q_per_pool; idx++) {
+ if (((uint64_t)0x1 << idx) & q_msk) {
+ if (vfinfo[vf].tx_rate[idx] != tx_rate)
+ vfinfo[vf].tx_rate[idx] = tx_rate;
+ total_rate += tx_rate;
+ }
+ }
+
+ if (total_rate > dev->data->dev_link.link_speed) {
+ /* Reset stored TX rate of the VF if it causes exceed
+ * link speed.
+ */
+ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+ return -EINVAL;
+ }
+
+ /* Set RTTBCNRC of each queue/pool for vf X */
+ for (; queue_idx <= queue_end; queue_idx++) {
+ if (0x1 & q_msk)
+ ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+ q_msk = q_msk >> 1;
+ }
+
+ return 0;
+}
+
/*
* Configure device link speed and setup link.
* It returns 0 on success.
@@ -2191,7 +2496,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t intr_vector = 0;
int err, link_up = 0, negotiate = 0;
uint32_t speed = 0;
@@ -2253,7 +2559,7 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -2291,10 +2597,11 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
/* Restore vf rate limit */
if (vfinfo != NULL) {
- for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+ for (vf = 0; vf < pci_dev->max_vfs; vf++)
for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
if (vfinfo[vf].tx_rate[idx] != 0)
- ixgbe_set_vf_rate_limit(dev, vf,
+ ixgbe_set_vf_rate_limit(
+ dev, vf,
vfinfo[vf].tx_rate[idx],
1 << idx);
}
@@ -2366,13 +2673,13 @@ skip_link_setup:
/* check if lsc interrupt is enabled */
if (dev->data->dev_conf.intr_conf.lsc != 0)
ixgbe_dev_lsc_interrupt_setup(dev);
+ ixgbe_dev_macsec_interrupt_setup(dev);
} else {
rte_intr_callback_unregister(intr_handle,
- ixgbe_dev_interrupt_handler,
- (void *)dev);
+ ixgbe_dev_interrupt_handler, dev);
if (dev->data->dev_conf.intr_conf.lsc != 0)
PMD_INIT_LOG(INFO, "lsc won't enable because of"
- " no intr multiplex\n");
+ " no intr multiplex");
}
/* check if rxq interrupt is enabled */
@@ -2385,6 +2692,8 @@ skip_link_setup:
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
+ ixgbe_l2_tunnel_conf(dev);
+ ixgbe_filter_restore(dev);
return 0;
@@ -2405,10 +2714,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_vf_info *vfinfo =
*IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
- struct ixgbe_filter_info *filter_info =
- IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- struct ixgbe_5tuple_filter *p_5tuple, *p_5tuple_next;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
PMD_INIT_FUNC_TRACE();
@@ -2423,8 +2730,7 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
/* stop adapter */
ixgbe_stop_adapter(hw);
- for (vf = 0; vfinfo != NULL &&
- vf < dev->pci_dev->max_vfs; vf++)
+ for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
vfinfo[vf].clear_to_send = false;
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) {
@@ -2445,17 +2751,6 @@ ixgbe_dev_stop(struct rte_eth_dev *dev)
memset(&link, 0, sizeof(link));
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
- /* Remove all ntuple filters of the device */
- for (p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list);
- p_5tuple != NULL; p_5tuple = p_5tuple_next) {
- p_5tuple_next = TAILQ_NEXT(p_5tuple, entries);
- TAILQ_REMOVE(&filter_info->fivetuple_list,
- p_5tuple, entries);
- rte_free(p_5tuple);
- }
- memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
-
if (!rte_intr_allow_others(intr_handle))
/* resume to the default handler */
rte_intr_callback_register(intr_handle,
@@ -2557,6 +2852,7 @@ ixgbe_dev_close(struct rte_eth_dev *dev)
static void
ixgbe_read_stats_registers(struct ixgbe_hw *hw,
struct ixgbe_hw_stats *hw_stats,
+ struct ixgbe_macsec_stats *macsec_stats,
uint64_t *total_missed_rx, uint64_t *total_qbrc,
uint64_t *total_qprc, uint64_t *total_qprdc)
{
@@ -2564,9 +2860,9 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
uint32_t delta_gprc = 0;
unsigned i;
/* Workaround for RX byte count not including CRC bytes when CRC
-+ * strip is enabled. CRC bytes are removed from counters when crc_strip
+ * strip is enabled. CRC bytes are removed from counters when crc_strip
* is disabled.
-+ */
+ */
int crc_strip = (IXGBE_READ_REG(hw, IXGBE_HLREG0) &
IXGBE_HLREG0_RXCRCSTRP);
@@ -2726,6 +3022,40 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw,
/* Flow Director Stats registers */
hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+
+ /* MACsec Stats registers */
+ macsec_stats->out_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECTXUT);
+ macsec_stats->out_pkts_encrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXPKTE);
+ macsec_stats->out_pkts_protected +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXPKTP);
+ macsec_stats->out_octets_encrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXOCTE);
+ macsec_stats->out_octets_protected +=
+ IXGBE_READ_REG(hw, IXGBE_LSECTXOCTP);
+ macsec_stats->in_pkts_untagged += IXGBE_READ_REG(hw, IXGBE_LSECRXUT);
+ macsec_stats->in_pkts_badtag += IXGBE_READ_REG(hw, IXGBE_LSECRXBAD);
+ macsec_stats->in_pkts_nosci += IXGBE_READ_REG(hw, IXGBE_LSECRXNOSCI);
+ macsec_stats->in_pkts_unknownsci +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXUNSCI);
+ macsec_stats->in_octets_decrypted +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOCTD);
+ macsec_stats->in_octets_validated +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOCTV);
+ macsec_stats->in_pkts_unchecked += IXGBE_READ_REG(hw, IXGBE_LSECRXUNCH);
+ macsec_stats->in_pkts_delayed += IXGBE_READ_REG(hw, IXGBE_LSECRXDELAY);
+ macsec_stats->in_pkts_late += IXGBE_READ_REG(hw, IXGBE_LSECRXLATE);
+ for (i = 0; i < 2; i++) {
+ macsec_stats->in_pkts_ok +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXOK(i));
+ macsec_stats->in_pkts_invalid +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXINV(i));
+ macsec_stats->in_pkts_notvalid +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXNV(i));
+ }
+ macsec_stats->in_pkts_unusedsa += IXGBE_READ_REG(hw, IXGBE_LSECRXUNSA);
+ macsec_stats->in_pkts_notusingsa +=
+ IXGBE_READ_REG(hw, IXGBE_LSECRXNUSA);
}
/*
@@ -2738,6 +3068,9 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_stats *hw_stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
unsigned i;
@@ -2746,8 +3079,8 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
total_qprc = 0;
total_qprdc = 0;
- ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
- &total_qprc, &total_qprdc);
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+ &total_qbrc, &total_qprc, &total_qprdc);
if (stats == NULL)
return;
@@ -2799,13 +3132,13 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
/* This function calculates the number of xstats based on the current config */
static unsigned
ixgbe_xstats_calc_num(void) {
- return IXGBE_NB_HW_STATS +
+ return IXGBE_NB_HW_STATS + IXGBE_NB_MACSEC_STATS +
(IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
(IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
}
static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned int size)
{
const unsigned cnt_stats = ixgbe_xstats_calc_num();
unsigned stat, i, count;
@@ -2826,6 +3159,15 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
count++;
}
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_macsec_strings[i].name);
+ count++;
+ }
+
/* RX Priority Stats */
for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2851,6 +3193,84 @@ static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
return cnt_stats;
}
+static int ixgbe_dev_xstats_get_names_by_id(
+ __rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ const uint64_t *ids,
+ unsigned int limit)
+{
+ if (!ids) {
+ const unsigned int cnt_stats = ixgbe_xstats_calc_num();
+ unsigned int stat, i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+
+ /* Extended stats from ixgbe_hw_stats */
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_stats_strings[i].name);
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_macsec_strings[i].name);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", i,
+ rte_ixgbe_rxq_strings[stat].name);
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", i,
+ rte_ixgbe_txq_strings[stat].name);
+ count++;
+ }
+ }
+ }
+ return cnt_stats;
+ }
+
+ uint16_t i;
+ uint16_t size = ixgbe_xstats_calc_num();
+ struct rte_eth_xstat_name xstats_names_copy[size];
+
+ ixgbe_dev_xstats_get_names_by_id(dev, xstats_names_copy, NULL,
+ size);
+
+ for (i = 0; i < limit; i++) {
+ if (ids[i] >= size) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ strcpy(xstats_names[i].name,
+ xstats_names_copy[ids[i]].name);
+ }
+ return limit;
+}
+
static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names, unsigned limit)
{
@@ -2875,6 +3295,9 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_stats *hw_stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
unsigned i, stat, count = 0;
@@ -2888,8 +3311,8 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
total_qprc = 0;
total_qprdc = 0;
- ixgbe_read_stats_registers(hw, hw_stats, &total_missed_rx, &total_qbrc,
- &total_qprc, &total_qprdc);
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats, &total_missed_rx,
+ &total_qbrc, &total_qprc, &total_qprdc);
/* If this is a reset xstats is NULL, and we have cleared the
* registers by reading them.
@@ -2906,6 +3329,14 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
count++;
}
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ xstats[count].value = *(uint64_t *)(((char *)macsec_stats) +
+ rte_ixgbe_macsec_strings[i].offset);
+ xstats[count].id = count;
+ count++;
+ }
+
/* RX Priority Stats */
for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
@@ -2930,11 +3361,105 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
return count;
}
+static int
+ixgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
+ uint64_t *values, unsigned int n)
+{
+ if (!ids) {
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_stats *hw_stats =
+ IXGBE_DEV_PRIVATE_TO_STATS(
+ dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
+ uint64_t total_missed_rx, total_qbrc, total_qprc, total_qprdc;
+ unsigned int i, stat, count = 0;
+
+ count = ixgbe_xstats_calc_num();
+
+ if (!ids && n < count)
+ return count;
+
+ total_missed_rx = 0;
+ total_qbrc = 0;
+ total_qprc = 0;
+ total_qprdc = 0;
+
+ ixgbe_read_stats_registers(hw, hw_stats, macsec_stats,
+ &total_missed_rx, &total_qbrc, &total_qprc,
+ &total_qprdc);
+
+ /* If this is a reset xstats is NULL, and we have cleared the
+ * registers by reading them.
+ */
+ if (!ids && !values)
+ return 0;
+
+ /* Extended stats from ixgbe_hw_stats */
+ count = 0;
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ values[count] = *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_stats_strings[i].offset);
+ count++;
+ }
+
+ /* MACsec Stats */
+ for (i = 0; i < IXGBE_NB_MACSEC_STATS; i++) {
+ values[count] = *(uint64_t *)(((char *)macsec_stats) +
+ rte_ixgbe_macsec_strings[i].offset);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ values[count] =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_rxq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ values[count] =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_ixgbe_txq_strings[stat].offset +
+ (sizeof(uint64_t) * i));
+ count++;
+ }
+ }
+ return count;
+ }
+
+ uint16_t i;
+ uint16_t size = ixgbe_xstats_calc_num();
+ uint64_t values_copy[size];
+
+ ixgbe_dev_xstats_get_by_id(dev, NULL, values_copy, size);
+
+ for (i = 0; i < n; i++) {
+ if (ids[i] >= size) {
+ PMD_INIT_LOG(ERR, "id value isn't valid");
+ return -1;
+ }
+ values[i] = values_copy[ids[i]];
+ }
+ return n;
+}
+
static void
ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
{
struct ixgbe_hw_stats *stats =
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+ struct ixgbe_macsec_stats *macsec_stats =
+ IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(
+ dev->data->dev_private);
unsigned count = ixgbe_xstats_calc_num();
@@ -2943,6 +3468,7 @@ ixgbe_dev_xstats_reset(struct rte_eth_dev *dev)
/* Reset software totals */
memset(stats, 0, sizeof(*stats));
+ memset(macsec_stats, 0, sizeof(*macsec_stats));
}
static void
@@ -2991,6 +3517,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
/* Extended stats */
for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
+ xstats[i].id = i;
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbevf_stats_strings[i].offset);
}
@@ -3031,12 +3558,35 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
hw_stats->vfgotc = 0;
}
+static int
+ixgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u16 eeprom_verh, eeprom_verl;
+ u32 etrack_id;
+ int ret;
+
+ ixgbe_read_eeprom(hw, 0x2e, &eeprom_verh);
+ ixgbe_read_eeprom(hw, 0x2d, &eeprom_verl);
+
+ etrack_id = (eeprom_verh << 16) | eeprom_verl;
+ ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (u32)ret)
+ return ret;
+ else
+ return 0;
+}
+
static void
ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
@@ -3052,7 +3602,7 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
@@ -3073,6 +3623,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
!RTE_ETH_DEV_SRIOV(dev).active)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_MACSEC_STRIP;
+
if (hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3086,6 +3640,10 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_TCP_TSO;
+ if (hw->mac.type == ixgbe_mac_82599EB ||
+ hw->mac.type == ixgbe_mac_X540)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
+
if (hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a)
@@ -3166,15 +3724,17 @@ static void
ixgbevf_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ dev_info->pci_dev = pci_dev;
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
if (hw->mac.type == ixgbe_mac_82598EB)
dev_info->max_vmdq_pools = ETH_16_POOLS;
else
@@ -3223,8 +3783,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link, old;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int link_up;
int diag;
+ u32 speed = 0;
+ bool autoneg = false;
link.link_status = ETH_LINK_DOWN;
link.link_speed = 0;
@@ -3234,6 +3798,14 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
hw->mac.get_link_status = true;
+ if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) &&
+ ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) {
+ speed = hw->phy.autoneg_advertised;
+ if (!speed)
+ ixgbe_get_link_capabilities(hw, &speed, &autoneg);
+ ixgbe_setup_link(hw, speed, true);
+ }
+
/* check if it needs to wait to complete, if lsc interrupt is enabled */
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
@@ -3251,10 +3823,12 @@ ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
if (link_up == 0) {
rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+ intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
if (link.link_status == old.link_status)
return -1;
return 0;
}
+ intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
@@ -3381,6 +3955,28 @@ ixgbe_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
return 0;
}
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ * Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_dev_macsec_interrupt_setup(struct rte_eth_dev *dev)
+{
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ intr->mask |= IXGBE_EICR_LINKSEC;
+
+ return 0;
+}
+
/*
* It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
*
@@ -3415,6 +4011,9 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
if (eicr & IXGBE_EICR_MAILBOX)
intr->flags |= IXGBE_FLAG_MAILBOX;
+ if (eicr & IXGBE_EICR_LINKSEC)
+ intr->flags |= IXGBE_FLAG_MACSEC;
+
if (hw->mac.type == ixgbe_mac_X550EM_x &&
hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X550EM_x))
@@ -3436,6 +4035,7 @@ ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
static void
ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
@@ -3451,10 +4051,10 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
(int)(dev->data->port_id));
}
PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
- dev->pci_dev->addr.domain,
- dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid,
- dev->pci_dev->addr.function);
+ pci_dev->addr.domain,
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
}
/*
@@ -3468,7 +4068,8 @@ ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
* - On failure, a negative value.
*/
static int
-ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
{
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
@@ -3506,19 +4107,20 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
ixgbe_dev_link_status_print(dev);
- intr->mask_original = intr->mask;
- /* only disable lsc interrupt */
- intr->mask &= ~IXGBE_EIMS_LSC;
if (rte_eal_alarm_set(timeout * 1000,
ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
- else
- intr->mask = intr->mask_original;
+ else {
+ /* remember original mask */
+ intr->mask_original = intr->mask;
+ /* only disable lsc interrupt */
+ intr->mask &= ~IXGBE_EIMS_LSC;
+ }
}
PMD_DRV_LOG(DEBUG, "enable intr immediately");
ixgbe_enable_intr(dev);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -3541,12 +4143,16 @@ static void
ixgbe_dev_interrupt_delayed_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_interrupt *intr =
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t eicr;
+ ixgbe_disable_intr(hw);
+
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (eicr & IXGBE_EICR_MAILBOX)
ixgbe_pf_mbx_process(dev);
@@ -3563,9 +4169,19 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
+ if (intr->flags & IXGBE_FLAG_MACSEC) {
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_MACSEC,
+ NULL);
+ intr->flags &= ~IXGBE_FLAG_MACSEC;
+ }
+
+ /* restore original mask */
+ intr->mask = intr->mask_original;
+ intr->mask_original = 0;
+
PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
ixgbe_enable_intr(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ rte_intr_enable(intr_handle);
}
/**
@@ -3581,13 +4197,12 @@ ixgbe_dev_interrupt_delayed_handler(void *param)
* void
*/
static void
-ixgbe_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+ixgbe_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
ixgbe_dev_interrupt_get_status(dev);
- ixgbe_dev_interrupt_action(dev);
+ ixgbe_dev_interrupt_action(dev, dev->intr_handle);
}
static int
@@ -3951,7 +4566,7 @@ ixgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, sp_reta_size);
+ "(%d)", reta_size, sp_reta_size);
return -EINVAL;
}
@@ -3998,7 +4613,7 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
if (reta_size != sp_reta_size) {
PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
"(%d) doesn't match the number hardware can supported "
- "(%d)\n", reta_size, sp_reta_size);
+ "(%d)", reta_size, sp_reta_size);
return -EINVAL;
}
@@ -4023,14 +4638,15 @@ ixgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
return 0;
}
-static void
+static int
ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t pool)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t enable_addr = 1;
- ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
+ return ixgbe_set_rar(hw, index, mac_addr->addr_bytes,
+ pool, enable_addr);
}
static void
@@ -4044,41 +4660,26 @@ ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
static void
ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+
ixgbe_remove_rar(dev, 0);
- ixgbe_add_rar(dev, addr, 0, 0);
+ ixgbe_add_rar(dev, addr, 0, pci_dev->max_vfs);
}
-int
-rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
- struct ether_addr *mac_addr)
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
{
- struct ixgbe_hw *hw;
- struct ixgbe_vf_info *vfinfo;
- int rar_entry;
- uint8_t *new_mac = (uint8_t *)(mac_addr);
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
+ if (strcmp(dev->data->drv_name, drv->driver.name))
+ return false;
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
- rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ return true;
+}
- if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
- rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
- ETHER_ADDR_LEN);
- return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
- IXGBE_RAH_AV);
- }
- return -EINVAL;
+bool
+is_ixgbe_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_ixgbe_pmd);
}
static int
@@ -4089,6 +4690,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
ixgbe_dev_info_get(dev, &dev_info);
@@ -4099,7 +4701,7 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -4197,7 +4799,8 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t intr_vector = 0;
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int err, mask = 0;
@@ -4242,7 +4845,7 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
dev->data->nb_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
- " intr_vec\n", dev->data->nb_rx_queues);
+ " intr_vec", dev->data->nb_rx_queues);
return -ENOMEM;
}
}
@@ -4260,7 +4863,8 @@ static void
ixgbevf_dev_stop(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
PMD_INIT_FUNC_TRACE();
@@ -4401,15 +5005,15 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
}
}
-static int
-ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
+int
+ixgbe_vt_check(struct ixgbe_hw *hw)
{
uint32_t reg_val;
- /* we only need to do this if VMDq is enabled */
+ /* if Virtualization Technology is enabled */
reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
- PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
+ PMD_INIT_LOG(ERR, "VT must be enabled for this setting");
return -1;
}
@@ -4547,343 +5151,6 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
return new_val;
}
-static int
-ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
- uint16_t rx_mask, uint8_t on)
-{
- int val = 0;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
- " on 82599 hardware and newer");
- return -ENOTSUP;
- }
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
-
- if (on)
- vmolr |= val;
- else
- vmolr &= ~val;
-
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- if (pool >= ETH_64_POOLS)
- return -EINVAL;
-
- /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
- if (pool >= 32) {
- addr = IXGBE_VFRE(1);
- val = bit1 << (pool - 32);
- } else {
- addr = IXGBE_VFRE(0);
- val = bit1 << pool;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
-{
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
-
- if (pool >= ETH_64_POOLS)
- return -EINVAL;
-
- /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
- if (pool >= 32) {
- addr = IXGBE_VFTE(1);
- val = bit1 << (pool - 32);
- } else {
- addr = IXGBE_VFTE(0);
- val = bit1 << pool;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-static int
-ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask, uint8_t vlan_on)
-{
- int ret = 0;
- uint16_t pool_idx;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vmdq_mode_check(hw) < 0)
- return -ENOTSUP;
- for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
- if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
- ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
- vlan_on, false);
- if (ret < 0)
- return ret;
- }
- }
-
- return ret;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- struct ixgbe_mac_info *mac;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mac = &hw->mac;
-
- mac->ops.set_vlan_anti_spoofing(hw, on, vf);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- struct ixgbe_mac_info *mac;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mac = &hw->mac;
- mac->ops.set_mac_anti_spoofing(hw, on, vf);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
-{
- struct ixgbe_hw *hw;
- uint32_t ctrl;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (vlan_id > 4095)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
- if (vlan_id) {
- ctrl = vlan_id;
- ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
- } else {
- ctrl = 0;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t ctrl;
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
- /* enable or disable VMDQ loopback */
- if (on)
- ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
- else
- ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
-
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t reg_value;
- int i;
- int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- for (i = 0; i <= num_queues; i++) {
- reg_value = IXGBE_QDE_WRITE |
- (i << IXGBE_QDE_IDX_SHIFT) |
- (on & IXGBE_QDE_ENABLE);
- IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
- }
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t reg_value;
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- /* only support VF's 0 to 63 */
- if ((vf >= dev_info.max_vfs) || (vf > 63))
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
- if (on)
- reg_value |= IXGBE_SRRCTL_DROP_EN;
- else
- reg_value &= ~IXGBE_SRRCTL_DROP_EN;
-
- IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct rte_eth_dev *dev;
- struct rte_eth_dev_info dev_info;
- uint16_t queues_per_pool;
- uint32_t q;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- rte_eth_dev_info_get(port, &dev_info);
-
- if (vf >= dev_info.max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
-
- /* The PF has 128 queue pairs and in SRIOV configuration
- * those queues will be assigned to VF's, so RXDCTL
- * registers will be dealing with queues which will be
- * assigned to VF's.
- * Let's say we have SRIOV configured with 31 VF's then the
- * first 124 queues 0-123 will be allocated to VF's and only
- * the last 4 queues 123-127 will be assigned to the PF.
- */
-
- queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
-
- for (q = 0; q < queues_per_pool; q++)
- (*dev->dev_ops->vlan_strip_queue_set)(dev,
- q + vf * queues_per_pool, on);
- return 0;
-}
-
#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
@@ -4894,8 +5161,8 @@ rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
static int
ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
- struct rte_eth_mirror_conf *mirror_conf,
- uint8_t rule_id, uint8_t on)
+ struct rte_eth_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
{
uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
@@ -4918,7 +5185,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint8_t mirror_type = 0;
- if (ixgbe_vmdq_mode_check(hw) < 0)
+ if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
if (rule_id >= IXGBE_MAX_MIRROR_RULES)
@@ -4926,22 +5193,28 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
- mirror_conf->rule_type);
+ mirror_conf->rule_type);
return -EINVAL;
}
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
- /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
+ /* Check if vlan id is valid and find conresponding VLAN ID
+ * index in VLVF
+ */
for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
- /* search vlan id related pool vlan filter index */
- reg_index = ixgbe_find_vlvf_slot(hw,
- mirror_conf->vlan.vlan_id[i],
- false);
+ /* search vlan id related pool vlan filter
+ * index
+ */
+ reg_index = ixgbe_find_vlvf_slot(
+ hw,
+ mirror_conf->vlan.vlan_id[i],
+ false);
if (reg_index < 0)
return -EINVAL;
- vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
+ vlvf = IXGBE_READ_REG(hw,
+ IXGBE_VLVF(reg_index));
if ((vlvf & IXGBE_VLVF_VIEN) &&
((vlvf & IXGBE_VLVF_VLANID_MASK) ==
mirror_conf->vlan.vlan_id[i]))
@@ -4971,7 +5244,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
}
}
- /*
+ /**
* if enable pool mirror, write related pool mask register,if disable
* pool mirror, clear PFMRVM register
*/
@@ -5001,8 +5274,9 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
mr_ctl |= mirror_type;
mr_ctl &= mirror_rule_mask;
mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
- } else
+ } else {
mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
+ }
mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
@@ -5039,11 +5313,11 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
- if (ixgbe_vmdq_mode_check(hw) < 0)
+ if (ixgbe_vt_check(hw) < 0)
return -ENOTSUP;
memset(&mr_info->mr_conf[rule_id], 0,
- sizeof(struct rte_eth_mirror_conf));
+ sizeof(struct rte_eth_mirror_conf));
/* clear PFVMCTL register */
IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
@@ -5062,6 +5336,8 @@ ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
static int
ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5071,7 +5347,7 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
RTE_SET_USED(queue_id);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -5094,6 +5370,8 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
static int
ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t mask;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5113,7 +5391,7 @@ ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
mask &= (1 << (queue_id - 32));
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
}
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(intr_handle);
return 0;
}
@@ -5217,7 +5495,8 @@ ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
static void
ixgbevf_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t q_idx;
@@ -5250,7 +5529,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev)
static void
ixgbe_configure_msix(struct rte_eth_dev *dev)
{
- struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
@@ -5365,62 +5645,7 @@ static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
return 0;
}
-static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vf_info *vfinfo =
- *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
- uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
- uint32_t queue_stride =
- IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
- uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
- uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
- uint16_t total_rate = 0;
-
- if (queue_end >= hw->mac.max_tx_queues)
- return -EINVAL;
-
- if (vfinfo != NULL) {
- for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
- if (vf_idx == vf)
- continue;
- for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
- idx++)
- total_rate += vfinfo[vf_idx].tx_rate[idx];
- }
- } else
- return -EINVAL;
-
- /* Store tx_rate for this vf. */
- for (idx = 0; idx < nb_q_per_pool; idx++) {
- if (((uint64_t)0x1 << idx) & q_msk) {
- if (vfinfo[vf].tx_rate[idx] != tx_rate)
- vfinfo[vf].tx_rate[idx] = tx_rate;
- total_rate += tx_rate;
- }
- }
-
- if (total_rate > dev->data->dev_link.link_speed) {
- /*
- * Reset stored TX rate of the VF if it causes exceed
- * link speed.
- */
- memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
- return -EINVAL;
- }
-
- /* Set RTTBCNRC of each queue/pool for vf X */
- for (; queue_idx <= queue_end; queue_idx++) {
- if (0x1 & q_msk)
- ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
- q_msk = q_msk >> 1;
- }
-
- return 0;
-}
-
-static void
+static int
ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
__attribute__((unused)) uint32_t index,
__attribute__((unused)) uint32_t pool)
@@ -5434,11 +5659,19 @@ ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
* set of PF resources used to store VF MAC addresses.
*/
if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
- return;
+ return -1;
diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
- if (diag == 0)
- return;
- PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+ if (diag != 0)
+ PMD_DRV_LOG(ERR, "Unable to add MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x - diag=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ diag);
+ return diag;
}
static void
@@ -5497,28 +5730,24 @@ ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
}
-#define MAC_TYPE_FILTER_SUP(type) do {\
- if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
- (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
- (type) != ixgbe_mac_X550EM_a)\
- return -ENOTSUP;\
-} while (0)
-
-static int
+int
ixgbe_syn_filter_set(struct rte_eth_dev *dev,
struct rte_eth_syn_filter *filter,
bool add)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t syn_info;
uint32_t synqf;
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
- synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ syn_info = filter_info->syn_info;
if (add) {
- if (synqf & IXGBE_SYN_FILTER_ENABLE)
+ if (syn_info & IXGBE_SYN_FILTER_ENABLE)
return -EINVAL;
synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
@@ -5528,10 +5757,13 @@ ixgbe_syn_filter_set(struct rte_eth_dev *dev,
else
synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
} else {
- if (!(synqf & IXGBE_SYN_FILTER_ENABLE))
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
return -ENOENT;
synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
}
+
+ filter_info->syn_info = synqf;
IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
IXGBE_WRITE_FLUSH(hw);
return 0;
@@ -5587,7 +5819,7 @@ ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
(struct rte_eth_syn_filter *)arg);
break;
default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op);
ret = -EINVAL;
break;
}
@@ -5609,6 +5841,52 @@ convert_protocol_type(uint8_t protocol_value)
return IXGBE_FILTER_PROTOCOL_NONE;
}
+/* inject a 5-tuple filter to HW */
+static inline void
+ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint8_t mask = 0xff;
+
+ i = filter->index;
+
+ sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+ IXGBE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
+
+ ftqf = (uint32_t)(filter->filter_info.proto &
+ IXGBE_FTQF_PROTOCOL_MASK);
+ ftqf |= (uint32_t)((filter->filter_info.priority &
+ IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= IXGBE_FTQF_DEST_PORT_MASK;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+ ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+
+ l34timir |= IXGBE_L34T_IMIR_RESERVE;
+ l34timir |= (uint32_t)(filter->queue <<
+ IXGBE_L34T_IMIR_QUEUE_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
+}
+
/*
* add a 5tuple filter
*
@@ -5626,13 +5904,9 @@ static int
ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
struct ixgbe_5tuple_filter *filter)
{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_filter_info *filter_info =
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
int i, idx, shift;
- uint32_t ftqf, sdpqf;
- uint32_t l34timir = 0;
- uint8_t mask = 0xff;
/*
* look for an unused 5tuple filter index,
@@ -5655,37 +5929,8 @@ ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
return -ENOSYS;
}
- sdpqf = (uint32_t)(filter->filter_info.dst_port <<
- IXGBE_SDPQF_DSTPORT_SHIFT);
- sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
-
- ftqf = (uint32_t)(filter->filter_info.proto &
- IXGBE_FTQF_PROTOCOL_MASK);
- ftqf |= (uint32_t)((filter->filter_info.priority &
- IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
- if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
- mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
- if (filter->filter_info.dst_ip_mask == 0)
- mask &= IXGBE_FTQF_DEST_ADDR_MASK;
- if (filter->filter_info.src_port_mask == 0)
- mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
- if (filter->filter_info.dst_port_mask == 0)
- mask &= IXGBE_FTQF_DEST_PORT_MASK;
- if (filter->filter_info.proto_mask == 0)
- mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
- ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
- ftqf |= IXGBE_FTQF_POOL_MASK_EN;
- ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
-
- IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
- IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+ ixgbe_inject_5tuple_filter(dev, filter);
- l34timir |= IXGBE_L34T_IMIR_RESERVE;
- l34timir |= (uint32_t)(filter->queue <<
- IXGBE_L34T_IMIR_QUEUE_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
return 0;
}
@@ -5722,6 +5967,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ixgbe_hw *hw;
uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -5731,7 +5977,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* refuse mtu that requires the support of scattered packets when this
* feature has not been enabled before.
*/
- if (!dev->data->scattered_rx &&
+ if (!rx_conf->enable_scatter &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
@@ -5752,11 +5998,6 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
}
-#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
- if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
- return -ENOTSUP;\
-} while (0)
-
static inline struct ixgbe_5tuple_filter *
ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
struct ixgbe_5tuple_filter_info *key)
@@ -5864,7 +6105,7 @@ ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
* - On success, zero.
* - On failure, a negative value.
*/
-static int
+int
ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
struct rte_eth_ntuple_filter *ntuple_filter,
bool add)
@@ -6009,48 +6250,7 @@ ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
return ret;
}
-static inline int
-ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
- uint16_t ethertype)
-{
- int i;
-
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (filter_info->ethertype_filters[i] == ethertype &&
- (filter_info->ethertype_mask & (1 << i)))
- return i;
- }
- return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
- uint16_t ethertype)
-{
- int i;
-
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (!(filter_info->ethertype_mask & (1 << i))) {
- filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] = ethertype;
- return i;
- }
- }
- return -1;
-}
-
-static inline int
-ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
- uint8_t idx)
-{
- if (idx >= IXGBE_MAX_ETQF_FILTERS)
- return -1;
- filter_info->ethertype_mask &= ~(1 << idx);
- filter_info->ethertype_filters[idx] = 0;
- return idx;
-}
-
-static int
+int
ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add)
@@ -6061,6 +6261,7 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
uint32_t etqf = 0;
uint32_t etqs = 0;
int ret;
+ struct ixgbe_ethertype_filter ethertype_filter;
if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
return -EINVAL;
@@ -6094,18 +6295,23 @@ ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
}
if (add) {
- ret = ixgbe_ethertype_filter_insert(filter_info,
- filter->ether_type);
- if (ret < 0) {
- PMD_DRV_LOG(ERR, "ethertype filters are full.");
- return -ENOSYS;
- }
etqf = IXGBE_ETQF_FILTER_EN;
etqf |= (uint32_t)filter->ether_type;
etqs |= (uint32_t)((filter->queue <<
IXGBE_ETQS_RX_QUEUE_SHIFT) &
IXGBE_ETQS_RX_QUEUE);
etqs |= IXGBE_ETQS_QUEUE_EN;
+
+ ethertype_filter.ethertype = filter->ether_type;
+ ethertype_filter.etqf = etqf;
+ ethertype_filter.etqs = etqs;
+ ethertype_filter.conf = FALSE;
+ ret = ixgbe_ethertype_filter_insert(filter_info,
+ &ethertype_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSPC;
+ }
} else {
ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
if (ret < 0)
@@ -6201,7 +6407,7 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = -EINVAL;
+ int ret = 0;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
@@ -6219,9 +6425,15 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_L2_TUNNEL:
ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ixgbe_flow_ops;
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
+ ret = -EINVAL;
break;
}
@@ -6869,12 +7081,15 @@ ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
if (l2_tunnel == NULL)
return -EINVAL;
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
break;
default:
@@ -6913,9 +7128,12 @@ ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = TRUE;
ret = ixgbe_e_tag_enable(hw);
break;
default:
@@ -6954,9 +7172,12 @@ ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
{
int ret = 0;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = FALSE;
ret = ixgbe_e_tag_disable(hw);
break;
default:
@@ -7045,12 +7266,108 @@ ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
return -EINVAL;
}
+static inline struct ixgbe_l2_tn_filter *
+ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_filter *l2_tn_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(l2_tn_info->hash_handle,
+ &l2_tn_filter->key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert L2 tunnel filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+ TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "No such L2 tunnel filter to delete %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_filter = l2_tn_info->hash_map[ret];
+ l2_tn_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+ rte_free(l2_tn_filter);
+
+ return 0;
+}
+
/* Add l2 tunnel filter */
-static int
+int
ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
- struct rte_eth_l2_tunnel_conf *l2_tunnel)
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore)
{
- int ret = 0;
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+ struct ixgbe_l2_tn_filter *node;
+
+ if (!restore) {
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+
+ node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+ if (node) {
+ PMD_DRV_LOG(ERR,
+ "The L2 tunnel filter already exists!");
+ return -EINVAL;
+ }
+
+ node = rte_zmalloc("ixgbe_l2_tn",
+ sizeof(struct ixgbe_l2_tn_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ (void)rte_memcpy(&node->key,
+ &key,
+ sizeof(struct ixgbe_l2_tn_key));
+ node->pool = l2_tunnel->pool;
+ ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
+ if (ret < 0) {
+ rte_free(node);
+ return ret;
+ }
+ }
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
@@ -7062,15 +7379,27 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
break;
}
+ if ((!restore) && (ret < 0))
+ (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
return ret;
}
/* Delete l2 tunnel filter */
-static int
+int
ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel)
{
- int ret = 0;
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+ ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+ if (ret < 0)
+ return ret;
switch (l2_tunnel->l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
@@ -7096,7 +7425,7 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
enum rte_filter_op filter_op,
void *arg)
{
- int ret = 0;
+ int ret;
if (filter_op == RTE_ETH_FILTER_NOP)
return 0;
@@ -7111,7 +7440,8 @@ ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
case RTE_ETH_FILTER_ADD:
ret = ixgbe_dev_l2_tunnel_filter_add
(dev,
- (struct rte_eth_l2_tunnel_conf *)arg);
+ (struct rte_eth_l2_tunnel_conf *)arg,
+ FALSE);
break;
case RTE_ETH_FILTER_DELETE:
ret = ixgbe_dev_l2_tunnel_filter_del
@@ -7154,10 +7484,13 @@ ixgbe_dev_l2_tunnel_forwarding_enable
(struct rte_eth_dev *dev,
enum rte_eth_tunnel_type l2_tunnel_type)
{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
int ret = 0;
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = TRUE;
ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
break;
default:
@@ -7175,10 +7508,13 @@ ixgbe_dev_l2_tunnel_forwarding_disable
(struct rte_eth_dev *dev,
enum rte_eth_tunnel_type l2_tunnel_type)
{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
int ret = 0;
switch (l2_tunnel_type) {
case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = FALSE;
ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
break;
default:
@@ -7195,15 +7531,16 @@ ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
struct rte_eth_l2_tunnel_conf *l2_tunnel,
bool en)
{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
int ret = 0;
uint32_t vmtir, vmvir;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (l2_tunnel->vf_id >= dev->pci_dev->max_vfs) {
+ if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
PMD_DRV_LOG(ERR,
"VF id %u should be less than %u",
l2_tunnel->vf_id,
- dev->pci_dev->max_vfs);
+ pci_dev->max_vfs);
return -EINVAL;
}
@@ -7529,7 +7866,7 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
}
static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
@@ -7584,8 +7921,7 @@ ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
}
static void
-ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+ixgbevf_dev_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
@@ -7593,7 +7929,226 @@ ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
ixgbevf_dev_interrupt_action(dev);
}
-RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
+/**
+ * ixgbe_disable_sec_tx_path_generic - Stops the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Stops the transmit data path and waits for the HW to internally empty
+ * the Tx security block
+ **/
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+#define IXGBE_MAX_SECTX_POLL 40
+
+ int i;
+ int sectxreg;
+
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ sectxreg |= IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+ for (i = 0; i < IXGBE_MAX_SECTX_POLL; i++) {
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT);
+ if (sectxreg & IXGBE_SECTXSTAT_SECTX_RDY)
+ break;
+ /* Use interrupt-safe sleep just in case */
+ usec_delay(1000);
+ }
+
+ /* For informational purposes only */
+ if (i >= IXGBE_MAX_SECTX_POLL)
+ PMD_DRV_LOG(DEBUG, "Tx unit being enabled before security "
+ "path fully disabled. Continuing with init.");
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_enable_sec_tx_path_generic - Enables the transmit data path
+ * @hw: pointer to hardware structure
+ *
+ * Enables the transmit data path.
+ **/
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
+{
+ uint32_t sectxreg;
+
+ sectxreg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ sectxreg &= ~IXGBE_SECTXCTRL_TX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, sectxreg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return IXGBE_SUCCESS;
+}
+
+/* restore n-tuple filter */
+static inline void
+ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *node;
+
+ TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+ ixgbe_inject_5tuple_filter(dev, node);
+ }
+}
+
+/* restore ethernet type filter */
+static inline void
+ixgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i)) {
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i),
+ filter_info->ethertype_filters[i].etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i),
+ filter_info->ethertype_filters[i].etqs);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* restore SYN filter */
+static inline void
+ixgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t synqf;
+
+ synqf = filter_info->syn_info;
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* restore L2 tunnel filter */
+static inline void
+ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *node;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+
+ TAILQ_FOREACH(node, &l2_tn_info->l2_tn_list, entries) {
+ l2_tn_conf.l2_tunnel_type = node->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = node->key.tn_id;
+ l2_tn_conf.pool = node->pool;
+ (void)ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_conf, TRUE);
+ }
+}
+
+static int
+ixgbe_filter_restore(struct rte_eth_dev *dev)
+{
+ ixgbe_ntuple_filter_restore(dev);
+ ixgbe_ethertype_filter_restore(dev);
+ ixgbe_syn_filter_restore(dev);
+ ixgbe_fdir_filter_restore(dev);
+ ixgbe_l2_tn_filter_restore(dev);
+
+ return 0;
+}
+
+static void
+ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (l2_tn_info->e_tag_en)
+ (void)ixgbe_e_tag_enable(hw);
+
+ if (l2_tn_info->e_tag_fwd_en)
+ (void)ixgbe_e_tag_forwarding_en_dis(dev, 1);
+
+ (void)ixgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
+/* remove all the n-tuple filters */
+void
+ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter *p_5tuple;
+
+ while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+ ixgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
+/* remove all the ether type filters */
+void
+ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_mask & (1 << i) &&
+ !filter_info->ethertype_filters[i].conf) {
+ (void)ixgbe_ethertype_filter_remove(filter_info,
+ (uint8_t)i);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(i), 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+ }
+}
+
+/* remove the SYN filter */
+void
+ixgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->syn_info & IXGBE_SYN_FILTER_ENABLE) {
+ filter_info->syn_info = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, 0);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+}
+
+/* remove all the L2 tunnel filters */
+int
+ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_conf;
+ int ret = 0;
+
+ while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+ l2_tn_conf.l2_tunnel_type = l2_tn_filter->key.l2_tn_type;
+ l2_tn_conf.tunnel_id = l2_tn_filter->key.tn_id;
+ l2_tn_conf.pool = l2_tn_filter->pool;
+ ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_conf);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
-RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe_vf, "* igb_uio | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index a4e2996a..b576a6f4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,11 +38,14 @@
#include "base/ixgbe_dcb_82598.h"
#include "ixgbe_bypass.h"
#include <rte_time.h>
+#include <rte_hash.h>
/* need update link, bit flag */
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
#define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
#define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
/*
* Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,10 +133,28 @@
#define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00
+
+#define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
+#define IXGBE_MAX_L2_TN_FILTER_NUM 128
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
+ return -ENOTSUP;\
+} while (0)
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+ (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
+ (type) != ixgbe_mac_X550EM_a)\
+ return -ENOTSUP;\
+} while (0)
+
/*
* Information about the fdir mode.
*/
-
struct ixgbe_hw_fdir_mask {
uint16_t vlan_tci_mask;
uint32_t src_ipv4_mask;
@@ -148,6 +169,28 @@ struct ixgbe_hw_fdir_mask {
uint8_t tunnel_type_mask;
};
+struct ixgbe_fdir_filter {
+ TAILQ_ENTRY(ixgbe_fdir_filter) entries;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t fdirhash; /* hash value for fdir */
+ uint8_t queue; /* assigned rx queue */
+};
+
+/* list of fdir filters */
+TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
+
+struct ixgbe_fdir_rule {
+ struct ixgbe_hw_fdir_mask mask;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
+ bool b_mask; /* If TRUE, mask has meaning. */
+ enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t soft_id; /* an unique value for this rule */
+ uint8_t queue; /* assigned rx queue */
+};
+
struct ixgbe_hw_fdir_info {
struct ixgbe_hw_fdir_mask mask;
uint8_t flex_bytes_offset;
@@ -159,6 +202,11 @@ struct ixgbe_hw_fdir_info {
uint64_t remove;
uint64_t f_add;
uint64_t f_remove;
+ struct ixgbe_fdir_filter_list fdir_list; /* filter list*/
+ /* store the pointers of the filters, index is the hash value. */
+ struct ixgbe_fdir_filter **hash_map;
+ struct rte_hash *hash_handle; /* cuckoo hash handler */
+ bool mask_added; /* If already got mask from consistent filter */
};
/* structure for interrupt relative data */
@@ -254,16 +302,136 @@ struct ixgbe_5tuple_filter {
(RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
(sizeof(uint32_t) * NBBY))
+struct ixgbe_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+ uint32_t etqs;
+ /**
+ * If this filter is added by configuration,
+ * it should not be removed.
+ */
+ bool conf;
+};
+
/*
* Structure to store filters' info.
*/
struct ixgbe_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/* store used ethertype filters*/
- uint16_t ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
+ struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
/* Bit mask for every used 5tuple filter */
uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
struct ixgbe_5tuple_filter_list fivetuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
+};
+
+struct ixgbe_l2_tn_key {
+ enum rte_eth_tunnel_type l2_tn_type;
+ uint32_t tn_id;
+};
+
+struct ixgbe_l2_tn_filter {
+ TAILQ_ENTRY(ixgbe_l2_tn_filter) entries;
+ struct ixgbe_l2_tn_key key;
+ uint32_t pool;
+};
+
+TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter);
+
+struct ixgbe_l2_tn_info {
+ struct ixgbe_l2_tn_filter_list l2_tn_list;
+ struct ixgbe_l2_tn_filter **hash_map;
+ struct rte_hash *hash_handle;
+ bool e_tag_en; /* e-tag enabled */
+ bool e_tag_fwd_en; /* e-tag based forwarding enabled */
+ bool e_tag_ether_type; /* ether type for e-tag */
+};
+
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+/* ntuple filter list structure */
+struct ixgbe_ntuple_filter_ele {
+ TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct ixgbe_ethertype_filter_ele {
+ TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct ixgbe_eth_syn_filter_ele {
+ TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct ixgbe_fdir_rule_ele {
+ TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
+ struct ixgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct ixgbe_eth_l2_tunnel_conf_ele {
+ TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
+ struct rte_eth_l2_tunnel_conf filter_info;
+};
+/* ixgbe_flow memory list structure */
+struct ixgbe_flow_mem {
+ TAILQ_ENTRY(ixgbe_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
+struct ixgbe_ntuple_filter_list filter_ntuple_list;
+TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
+struct ixgbe_ethertype_filter_list filter_ethertype_list;
+TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
+struct ixgbe_syn_filter_list filter_syn_list;
+TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
+struct ixgbe_fdir_rule_filter_list filter_fdir_list;
+TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
+struct ixgbe_flow_mem_list ixgbe_flow_list;
+
+/*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+ /* TX port statistics */
+ uint64_t out_pkts_untagged;
+ uint64_t out_pkts_encrypted;
+ uint64_t out_pkts_protected;
+ uint64_t out_octets_encrypted;
+ uint64_t out_octets_protected;
+
+ /* RX port statistics */
+ uint64_t in_pkts_untagged;
+ uint64_t in_pkts_badtag;
+ uint64_t in_pkts_nosci;
+ uint64_t in_pkts_unknownsci;
+ uint64_t in_octets_decrypted;
+ uint64_t in_octets_validated;
+
+ /* RX SC statistics */
+ uint64_t in_pkts_unchecked;
+ uint64_t in_pkts_delayed;
+ uint64_t in_pkts_late;
+
+ /* RX SA statistics */
+ uint64_t in_pkts_ok;
+ uint64_t in_pkts_invalid;
+ uint64_t in_pkts_notvalid;
+ uint64_t in_pkts_unusedsa;
+ uint64_t in_pkts_notusingsa;
+};
+
+/* The configuration of bandwidth */
+struct ixgbe_bw_conf {
+ uint8_t tc_num; /* Number of TCs. */
};
/*
@@ -272,6 +440,7 @@ struct ixgbe_filter_info {
struct ixgbe_adapter {
struct ixgbe_hw hw;
struct ixgbe_hw_stats stats;
+ struct ixgbe_macsec_stats macsec_stats;
struct ixgbe_hw_fdir_info fdir;
struct ixgbe_interrupt intr;
struct ixgbe_stat_mapping_registers stat_mappings;
@@ -285,6 +454,8 @@ struct ixgbe_adapter {
struct ixgbe_bypass_info bps;
#endif /* RTE_NIC_BYPASS */
struct ixgbe_filter_info filter;
+ struct ixgbe_l2_tn_info l2_tn;
+ struct ixgbe_bw_conf bw_conf;
bool rx_bulk_alloc_allowed;
bool rx_vec_allowed;
@@ -293,12 +464,18 @@ struct ixgbe_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+#define IXGBE_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
(&((struct ixgbe_adapter *)adapter)->stats)
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
(&((struct ixgbe_adapter *)adapter)->intr)
@@ -329,6 +506,12 @@ struct ixgbe_adapter {
#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct ixgbe_adapter *)adapter)->filter)
+#define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->l2_tn)
+
+#define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->bw_conf)
+
/*
* RX/TX function prototypes
*/
@@ -353,7 +536,9 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
-int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
@@ -398,6 +583,9 @@ uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
@@ -414,10 +602,31 @@ uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i);
bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
+int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter,
+ bool add);
+int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore);
+int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel);
+void ixgbe_filterlist_flush(void);
/*
* Flow director function prototypes
*/
int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
+ bool del, bool update);
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
@@ -444,4 +653,74 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
enum rte_filter_op filter_op, void *arg);
+void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
+
+extern const struct rte_flow_ops ixgbe_flow_ops;
+
+void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_syn_filter(struct rte_eth_dev *dev);
+int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_vt_check(struct ixgbe_hw *hw);
+int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+bool is_ixgbe_supported(struct rte_eth_dev *dev);
+
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+ uint16_t ethertype)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
+ (filter_info->ethertype_mask & (1 << i)))
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+ struct ixgbe_ethertype_filter *ethertype_filter)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (!(filter_info->ethertype_mask & (1 << i))) {
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i].ethertype =
+ ethertype_filter->ethertype;
+ filter_info->ethertype_filters[i].etqf =
+ ethertype_filter->etqf;
+ filter_info->ethertype_filters[i].etqs =
+ ethertype_filter->etqs;
+ filter_info->ethertype_filters[i].conf =
+ ethertype_filter->conf;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= IXGBE_MAX_ETQF_FILTERS)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
+ filter_info->ethertype_filters[idx].etqs = 0;
+ filter_info->ethertype_filters[idx].etqs = FALSE;
+ return idx;
+}
+
#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index 4b81ee37..7f6c7b58 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -43,6 +43,7 @@
#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_malloc.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -111,10 +112,8 @@
static int fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash);
static int fdir_set_input_mask(struct rte_eth_dev *dev,
const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_82599(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask);
-static int fdir_set_input_mask_x550(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask);
+static int fdir_set_input_mask_82599(struct rte_eth_dev *dev);
+static int fdir_set_input_mask_x550(struct rte_eth_dev *dev);
static int ixgbe_set_fdir_flex_conf(struct rte_eth_dev *dev,
const struct rte_eth_fdir_flex_conf *conf, uint32_t *fdirctrl);
static int fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl);
@@ -294,8 +293,7 @@ reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword)
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
-fdir_set_input_mask_82599(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_82599(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@@ -307,8 +305,6 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX;
uint32_t fdirtcpm; /* TCP source and destination port masks. */
uint32_t fdiripv6m; /* IPv6 source and destination masks. */
- uint16_t dst_ipv6m = 0;
- uint16_t src_ipv6m = 0;
volatile uint32_t *reg;
PMD_INIT_FUNC_TRACE();
@@ -319,31 +315,30 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* a VLAN of 0 is unspecified, so mask that out as well. L4type
* cannot be masked out in this implementation.
*/
- if (input_mask->dst_port_mask == 0 && input_mask->src_port_mask == 0)
+ if (info->mask.dst_port_mask == 0 && info->mask.src_port_mask == 0)
/* use the L4 protocol mask for raw IPv4/IPv6 traffic */
fdirm |= IXGBE_FDIRM_L4P;
- if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- else if (input_mask->vlan_tci_mask == 0)
+ else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
- info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
fdirtcpm = reverse_fdir_bitmasks(
- rte_be_to_cpu_16(input_mask->dst_port_mask),
- rte_be_to_cpu_16(input_mask->src_port_mask));
+ rte_be_to_cpu_16(info->mask.dst_port_mask),
+ rte_be_to_cpu_16(info->mask.src_port_mask));
/* write all the same so that UDP, TCP and SCTP use the same mask
* (little-endian)
@@ -351,30 +346,23 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
- info->mask.src_port_mask = input_mask->src_port_mask;
- info->mask.dst_port_mask = input_mask->dst_port_mask;
/* Store source and destination IPv4 masks (big-endian),
* can not use IXGBE_WRITE_REG.
*/
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRSIP4M);
- *reg = ~(input_mask->ipv4_mask.src_ip);
+ *reg = ~(info->mask.src_ipv4_mask);
reg = IXGBE_PCI_REG_ADDR(hw, IXGBE_FDIRDIP4M);
- *reg = ~(input_mask->ipv4_mask.dst_ip);
- info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
- info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ *reg = ~(info->mask.dst_ipv4_mask);
if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_SIGNATURE) {
/*
* Store source and destination IPv6 masks (bit reversed)
*/
- IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
- IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
- fdiripv6m = (dst_ipv6m << 16) | src_ipv6m;
+ fdiripv6m = (info->mask.dst_ipv6_mask << 16) |
+ info->mask.src_ipv6_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, ~fdiripv6m);
- info->mask.src_ipv6_mask = src_ipv6m;
- info->mask.dst_ipv6_mask = dst_ipv6m;
}
return IXGBE_SUCCESS;
@@ -385,8 +373,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev,
* but makes use of the rte_fdir_masks structure to see which bits to set.
*/
static int
-fdir_set_input_mask_x550(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+fdir_set_input_mask_x550(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_hw_fdir_info *info =
@@ -409,20 +396,19 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
/* some bits must be set for mac vlan or tunnel mode */
fdirm |= IXGBE_FDIRM_L4P | IXGBE_FDIRM_L3P;
- if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
+ if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0x0FFF))
/* mask VLAN Priority */
fdirm |= IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask == rte_cpu_to_be_16(0xE000))
+ else if (info->mask.vlan_tci_mask == rte_cpu_to_be_16(0xE000))
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- else if (input_mask->vlan_tci_mask == 0)
+ else if (info->mask.vlan_tci_mask == 0)
/* mask VLAN ID and Priority */
fdirm |= IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP;
- else if (input_mask->vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
+ else if (info->mask.vlan_tci_mask != rte_cpu_to_be_16(0xEFFF)) {
PMD_INIT_LOG(ERR, "invalid vlan_tci_mask");
return -EINVAL;
}
- info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
@@ -433,12 +419,11 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
IXGBE_FDIRIP6M_TNI_VNI;
if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) {
- mac_mask = input_mask->mac_addr_byte_mask;
+ mac_mask = info->mask.mac_addr_byte_mask;
fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT)
& IXGBE_FDIRIP6M_INNER_MAC;
- info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
- switch (input_mask->tunnel_type_mask) {
+ switch (info->mask.tunnel_type_mask) {
case 0:
/* Mask turnnel type */
fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
@@ -449,10 +434,8 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_type_mask");
return -EINVAL;
}
- info->mask.tunnel_type_mask =
- input_mask->tunnel_type_mask;
- switch (rte_be_to_cpu_32(input_mask->tunnel_id_mask)) {
+ switch (rte_be_to_cpu_32(info->mask.tunnel_id_mask)) {
case 0x0:
/* Mask vxlan id */
fdiripv6m |= IXGBE_FDIRIP6M_TNI_VNI;
@@ -466,8 +449,6 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
PMD_INIT_LOG(ERR, "invalid tunnel_id_mask");
return -EINVAL;
}
- info->mask.tunnel_id_mask =
- input_mask->tunnel_id_mask;
}
IXGBE_WRITE_REG(hw, IXGBE_FDIRIP6M, fdiripv6m);
@@ -481,22 +462,90 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev,
}
static int
-fdir_set_input_mask(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_masks *input_mask)
+ixgbe_fdir_store_input_mask_82599(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ uint16_t dst_ipv6m = 0;
+ uint16_t src_ipv6m = 0;
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.src_port_mask = input_mask->src_port_mask;
+ info->mask.dst_port_mask = input_mask->dst_port_mask;
+ info->mask.src_ipv4_mask = input_mask->ipv4_mask.src_ip;
+ info->mask.dst_ipv4_mask = input_mask->ipv4_mask.dst_ip;
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.src_ip, src_ipv6m);
+ IPV6_ADDR_TO_MASK(input_mask->ipv6_mask.dst_ip, dst_ipv6m);
+ info->mask.src_ipv6_mask = src_ipv6m;
+ info->mask.dst_ipv6_mask = dst_ipv6m;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask_x550(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ struct ixgbe_hw_fdir_info *info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+
+ memset(&info->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
+ info->mask.vlan_tci_mask = input_mask->vlan_tci_mask;
+ info->mask.mac_addr_byte_mask = input_mask->mac_addr_byte_mask;
+ info->mask.tunnel_type_mask = input_mask->tunnel_type_mask;
+ info->mask.tunnel_id_mask = input_mask->tunnel_id_mask;
+
+ return IXGBE_SUCCESS;
+}
+
+static int
+ixgbe_fdir_store_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (mode >= RTE_FDIR_MODE_SIGNATURE &&
+ mode <= RTE_FDIR_MODE_PERFECT)
+ return ixgbe_fdir_store_input_mask_82599(dev, input_mask);
+ else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
+ mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ return ixgbe_fdir_store_input_mask_x550(dev, input_mask);
+
+ PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
+ return -ENOTSUP;
+}
+
+int
+ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev)
{
enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
if (mode >= RTE_FDIR_MODE_SIGNATURE &&
mode <= RTE_FDIR_MODE_PERFECT)
- return fdir_set_input_mask_82599(dev, input_mask);
+ return fdir_set_input_mask_82599(dev);
else if (mode >= RTE_FDIR_MODE_PERFECT_MAC_VLAN &&
mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
- return fdir_set_input_mask_x550(dev, input_mask);
+ return fdir_set_input_mask_x550(dev);
PMD_DRV_LOG(ERR, "Not supported fdir mode - %d!", mode);
return -ENOTSUP;
}
+static int
+fdir_set_input_mask(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_masks *input_mask)
+{
+ int ret;
+
+ ret = ixgbe_fdir_store_input_mask(dev, input_mask);
+ if (ret)
+ return ret;
+
+ return ixgbe_fdir_set_input_mask(dev);
+}
+
/*
* ixgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
* arguments are valid
@@ -681,6 +730,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp4_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp4_flow.dst_port;
+ /* fall-through */
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
@@ -696,6 +746,7 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter,
fdir_filter->input.flow.udp6_flow.src_port;
input->formatted.dst_port =
fdir_filter->input.flow.udp6_flow.dst_port;
+ /* fall-through */
/*for SCTP flow type, port and verify_tag are meaningless in ixgbe.*/
case RTE_ETH_FLOW_NONFRAG_IPV6_SCTP:
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
@@ -1075,36 +1126,115 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash)
}
-/*
- * ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
- * @dev: pointer to the structure rte_eth_dev
- * @fdir_filter: fdir filter entry
- * @del: 1 - delete, 0 - add
- * @update: 1 - update
- */
+static inline struct ixgbe_fdir_filter *
+ixgbe_fdir_filter_lookup(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(fdir_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return fdir_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ struct ixgbe_fdir_filter *fdir_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(fdir_info->hash_handle,
+ &fdir_filter->ixgbe_fdir);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert fdir filter to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ fdir_info->hash_map[ret] = fdir_filter;
+
+ TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_fdir_filter(struct ixgbe_hw_fdir_info *fdir_info,
+ union ixgbe_atr_input *key)
+{
+ int ret;
+ struct ixgbe_fdir_filter *fdir_filter;
+
+ ret = rte_hash_del_key(fdir_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "No such fdir filter to delete %d!", ret);
+ return ret;
+ }
+
+ fdir_filter = fdir_info->hash_map[ret];
+ fdir_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
+ rte_free(fdir_filter);
+
+ return 0;
+}
+
static int
-ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
- const struct rte_eth_fdir_filter *fdir_filter,
+ixgbe_interpret_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ struct ixgbe_fdir_rule *rule)
+{
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ int err;
+
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+
+ err = ixgbe_fdir_filter_to_atr_input(fdir_filter,
+ &rule->ixgbe_fdir,
+ fdir_mode);
+ if (err)
+ return err;
+
+ rule->mode = fdir_mode;
+ if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT)
+ rule->fdirflags = IXGBE_FDIRCMD_DROP;
+ rule->queue = fdir_filter->action.rx_queue;
+ rule->soft_id = fdir_filter->soft_id;
+
+ return 0;
+}
+
+int
+ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
bool del,
bool update)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t fdircmd_flags;
uint32_t fdirhash;
- union ixgbe_atr_input input;
uint8_t queue;
bool is_perfect = FALSE;
int err;
struct ixgbe_hw_fdir_info *info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+ struct ixgbe_fdir_filter *node;
+ bool add_node = FALSE;
- if (fdir_mode == RTE_FDIR_MODE_NONE)
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
return -ENOTSUP;
/*
* Sanity check for x550.
- * When adding a new filter with flow type set to IPv4-other,
+ * When adding a new filter with flow type set to IPv4,
* the flow director mask should be configed before,
* and the L4 protocol and ports are masked.
*/
@@ -1112,12 +1242,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
(hw->mac.type == ixgbe_mac_X550 ||
hw->mac.type == ixgbe_mac_X550EM_x ||
hw->mac.type == ixgbe_mac_X550EM_a) &&
- (fdir_filter->input.flow_type ==
- RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) &&
+ (rule->ixgbe_fdir.formatted.flow_type ==
+ IXGBE_ATR_FLOW_TYPE_IPV4) &&
(info->mask.src_port_mask != 0 ||
info->mask.dst_port_mask != 0)) {
PMD_DRV_LOG(ERR, "By this device,"
- " IPv4-other is not supported without"
+ " IPv4 is not supported without"
" L4 protocol and ports masked!");
return -ENOTSUP;
}
@@ -1126,28 +1256,26 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
is_perfect = TRUE;
- memset(&input, 0, sizeof(input));
-
- err = ixgbe_fdir_filter_to_atr_input(fdir_filter, &input,
- fdir_mode);
- if (err)
- return err;
-
if (is_perfect) {
- if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+ if (rule->ixgbe_fdir.formatted.flow_type &
+ IXGBE_ATR_L4TYPE_IPV6_MASK) {
PMD_DRV_LOG(ERR, "IPv6 is not supported in"
" perfect mode!");
return -ENOTSUP;
}
- fdirhash = atr_compute_perfect_hash_82599(&input,
+ fdirhash = atr_compute_perfect_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
- fdirhash |= fdir_filter->soft_id <<
+ fdirhash |= rule->soft_id <<
IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
} else
- fdirhash = atr_compute_sig_hash_82599(&input,
+ fdirhash = atr_compute_sig_hash_82599(&rule->ixgbe_fdir,
dev->data->dev_conf.fdir_conf.pballoc);
if (del) {
+ err = ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ if (err < 0)
+ return err;
+
err = fdir_erase_filter_82599(hw, fdirhash);
if (err < 0)
PMD_DRV_LOG(ERR, "Fail to delete FDIR filter!");
@@ -1157,7 +1285,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
}
/* add or update an fdir filter*/
fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
- if (fdir_filter->action.behavior == RTE_ETH_FDIR_REJECT) {
+ if (rule->fdirflags & IXGBE_FDIRCMD_DROP) {
if (is_perfect) {
queue = dev->data->dev_conf.fdir_conf.drop_queue;
fdircmd_flags |= IXGBE_FDIRCMD_DROP;
@@ -1166,28 +1294,86 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
" signature mode.");
return -EINVAL;
}
- } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT &&
- fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM)
- queue = (uint8_t)fdir_filter->action.rx_queue;
+ } else if (rule->queue < IXGBE_MAX_RX_QUEUE_NUM)
+ queue = (uint8_t)rule->queue;
else
return -EINVAL;
+ node = ixgbe_fdir_filter_lookup(info, &rule->ixgbe_fdir);
+ if (node) {
+ if (update) {
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+ } else {
+ PMD_DRV_LOG(ERR, "Conflict with existing fdir filter!");
+ return -EINVAL;
+ }
+ } else {
+ add_node = TRUE;
+ node = rte_zmalloc("ixgbe_fdir",
+ sizeof(struct ixgbe_fdir_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+ (void)rte_memcpy(&node->ixgbe_fdir,
+ &rule->ixgbe_fdir,
+ sizeof(union ixgbe_atr_input));
+ node->fdirflags = fdircmd_flags;
+ node->fdirhash = fdirhash;
+ node->queue = queue;
+
+ err = ixgbe_insert_fdir_filter(info, node);
+ if (err < 0) {
+ rte_free(node);
+ return err;
+ }
+ }
+
if (is_perfect) {
- err = fdir_write_perfect_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash,
- fdir_mode);
+ err = fdir_write_perfect_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash, fdir_mode);
} else {
- err = fdir_add_signature_filter_82599(hw, &input, queue,
- fdircmd_flags, fdirhash);
+ err = fdir_add_signature_filter_82599(hw, &rule->ixgbe_fdir,
+ queue, fdircmd_flags,
+ fdirhash);
}
- if (err < 0)
+ if (err < 0) {
PMD_DRV_LOG(ERR, "Fail to add FDIR filter!");
- else
+
+ if (add_node)
+ (void)ixgbe_remove_fdir_filter(info, &rule->ixgbe_fdir);
+ } else {
PMD_DRV_LOG(DEBUG, "Success to add FDIR filter");
+ }
return err;
}
+/* ixgbe_add_del_fdir_filter - add or remove a flow diretor filter.
+ * @dev: pointer to the structure rte_eth_dev
+ * @fdir_filter: fdir filter entry
+ * @del: 1 - delete, 0 - add
+ * @update: 1 - update
+ */
+static int
+ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_eth_fdir_filter *fdir_filter,
+ bool del,
+ bool update)
+{
+ struct ixgbe_fdir_rule rule;
+ int err;
+
+ err = ixgbe_interpret_fdir_filter(dev, fdir_filter, &rule);
+
+ if (err)
+ return err;
+
+ return ixgbe_fdir_filter_program(dev, &rule, del, update);
+}
+
static int
ixgbe_fdir_flush(struct rte_eth_dev *dev)
{
@@ -1378,3 +1564,66 @@ ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
}
return ret;
}
+
+/* restore flow director filter */
+void
+ixgbe_fdir_filter_restore(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *node;
+ bool is_perfect = FALSE;
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+ fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+ is_perfect = TRUE;
+
+ if (is_perfect) {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_write_perfect_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash,
+ fdir_mode);
+ }
+ } else {
+ TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+ (void)fdir_add_signature_filter_82599(hw,
+ &node->ixgbe_fdir,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash);
+ }
+ }
+}
+
+/* remove all the flow director filters */
+int
+ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_fdir_filter *fdir_filter;
+ struct ixgbe_fdir_filter *filter_flag;
+ int ret = 0;
+
+ /* flush flow director */
+ rte_hash_reset(fdir_info->hash_handle);
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct ixgbe_fdir_filter *) * IXGBE_MAX_FDIR_FILTER_NUM);
+ filter_flag = TAILQ_FIRST(&fdir_info->fdir_list);
+ while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+ TAILQ_REMOVE(&fdir_info->fdir_list,
+ fdir_filter,
+ entries);
+ rte_free(fdir_filter);
+ }
+
+ if (filter_flag != NULL)
+ ret = ixgbe_fdir_flush(dev);
+
+ return ret;
+}
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
new file mode 100644
index 00000000..da7b1cc8
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -0,0 +1,2778 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <netinet/in.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+#include <rte_random.h>
+#include <rte_dev.h>
+#include <rte_hash_crc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "ixgbe_logs.h"
+#include "base/ixgbe_api.h"
+#include "base/ixgbe_vf.h"
+#include "base/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+#include "ixgbe_bypass.h"
+#include "ixgbe_rxtx.h"
+#include "base/ixgbe_type.h"
+#include "base/ixgbe_phy.h"
+#include "rte_pmd_ixgbe.h"
+
+
+#define IXGBE_MIN_N_TUPLE_PRIO 1
+#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
+ do { \
+ item = pattern + index;\
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+ index++; \
+ item = pattern + index; \
+ } \
+ } while (0)
+
+#define NEXT_ITEM_OF_ACTION(act, actions, index)\
+ do { \
+ act = actions + index; \
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+ index++; \
+ act = actions + index; \
+ } \
+ } while (0)
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * next_proto_id 17 0xFF
+ * UDP/TCP/ src_port 80 0xFFFF
+ * SCTP dst_port 80 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item can be MAC or IPv4 */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+ /* if the first item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ /* check if the next not void item is IPv4 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+ }
+
+ /* get the IPv4 info */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+
+ ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ /**
+ * Only support src & dst addresses, protocol,
+ * others should be masked.
+ */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+ filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+ filter->proto_mask = ipv4_mask->hdr.next_proto_id;
+
+ ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ filter->dst_ip = ipv4_spec->hdr.dst_addr;
+ filter->src_ip = ipv4_spec->hdr.src_addr;
+ filter->proto = ipv4_spec->hdr.next_proto_id;
+
+ /* check if the next not void item is TCP or UDP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* get the TCP/UDP info */
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ntuple mask");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+ /**
+ * Only support src & dst ports, tcp flags,
+ * others should be masked.
+ */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = tcp_mask->hdr.dst_port;
+ filter->src_port_mask = tcp_mask->hdr.src_port;
+ if (tcp_mask->hdr.tcp_flags == 0xFF) {
+ filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else if (!tcp_mask->hdr.tcp_flags) {
+ filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ filter->dst_port = tcp_spec->hdr.dst_port;
+ filter->src_port = tcp_spec->hdr.src_port;
+ filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+ } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = udp_mask->hdr.dst_port;
+ filter->src_port_mask = udp_mask->hdr.src_port;
+
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ filter->dst_port = udp_spec->hdr.dst_port;
+ filter->src_port = udp_spec->hdr.src_port;
+ } else {
+ sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+
+ /**
+ * Only support src & dst ports,
+ * others should be masked.
+ */
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(filter, 0,
+ sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ filter->dst_port_mask = sctp_mask->hdr.dst_port;
+ filter->src_port_mask = sctp_mask->hdr.src_port;
+
+ sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ filter->dst_port = sctp_spec->hdr.dst_port;
+ filter->src_port = sctp_spec->hdr.src_port;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /**
+ * n-tuple only supports forwarding,
+ * check if the first not void action is QUEUE.
+ */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ item, "Not supported action.");
+ return -rte_errno;
+ }
+ filter->queue =
+ ((const struct rte_flow_action_queue *)act->conf)->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+ filter->priority = (uint16_t)attr->priority;
+ if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+ attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
+ filter->priority = 1;
+
+ return 0;
+}
+
+/* a specific function for ixgbe because the flags is specific */
+static int
+ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ntuple_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Ixgbe doesn't support tcp flags. */
+ if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ /* Ixgbe doesn't support many priorities. */
+ if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+ filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Priority not supported by ntuple filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ return -rte_errno;
+
+ /* fixed value for ixgbe */
+ filter->flags = RTE_5TUPLE_FLAGS;
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH type 0x0807 0xFFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item *pattern,
+ const struct rte_flow_action *actions,
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* Parse pattern */
+ index = 0;
+
+ /* The first non-void item should be MAC. */
+ item = pattern + index;
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ index++;
+ item = pattern + index;
+ }
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Mask bits of source MAC address must be full of 0.
+ * Mask bits of destination MAC address must be full
+ * of 1 or full of 0.
+ */
+ if (!is_zero_ether_addr(&eth_mask->src) ||
+ (!is_zero_ether_addr(&eth_mask->dst) &&
+ !is_broadcast_ether_addr(&eth_mask->dst))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ether address mask");
+ return -rte_errno;
+ }
+
+ if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid ethertype mask");
+ return -rte_errno;
+ }
+
+ /* If mask bits of destination MAC address
+ * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+ */
+ if (is_broadcast_ether_addr(&eth_mask->dst)) {
+ filter->mac_addr = eth_spec->dst;
+ filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+ } else {
+ filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+ }
+ filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ /* Check if the next non-void item is END. */
+ index++;
+ item = pattern + index;
+ while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ index++;
+ item = pattern + index;
+ }
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ethertype filter.");
+ return -rte_errno;
+ }
+
+ /* Parse action */
+
+ index = 0;
+ /* Check if the first non-void action is QUEUE or DROP. */
+ act = actions + index;
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ index++;
+ act = actions + index;
+ }
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ } else {
+ filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
+ }
+
+ /* Check if the next non-void item is END */
+ index++;
+ act = actions + index;
+ while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ index++;
+ act = actions + index;
+ }
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* Parse attr */
+ /* Must be input direction */
+ if (!attr->ingress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->egress) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->priority) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* Not supported */
+ if (attr->group) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr, "Not support group.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_ethertype_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_ethertype_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ /* Ixgbe doesn't support MAC address. */
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "queue index much too big");
+ return -rte_errno;
+ }
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "IPv4/IPv6 not supported by ethertype filter");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "mac compare is unsupported");
+ return -rte_errno;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "drop option is unsupported");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * TCP tcp_flags 0x02 0xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_action *act;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /* parse pattern */
+ index = 0;
+
+ /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Skip Ethernet */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* if the item is MAC, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN address mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is IPv4 or IPv6 */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* if the item is IP, the content should be NULL */
+ if (item->spec || item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is TCP */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. Only support SYN. */
+ if (!item->spec || !item->mask) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid SYN mask");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ tcp_mask->hdr.src_port ||
+ tcp_mask->hdr.dst_port ||
+ tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by syn filter");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->queue = act_q->index;
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* Support 2 priorities, the lowest or highest. */
+ if (!attr->priority) {
+ filter->hig_pri = 0;
+ } else if (attr->priority == (uint32_t)~0U) {
+ filter->hig_pri = 1;
+ } else {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_syn_filter *filter,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ ret = cons_parse_syn_filter(attr, pattern,
+ actions, filter, error);
+
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a L2 tunnel rule.
+ * And get the L2 tunnel filter info BTW.
+ * Only support E-tag now.
+ * pattern:
+ * The first not void item can be E_TAG.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEM Spec Mask
+ * E_TAG grp 0x1 0x3
+ e_cid_base 0x309 0xFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *filter,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_e_tag *e_tag_spec;
+ const struct rte_flow_item_e_tag *e_tag_mask;
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ uint32_t index;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+ /* parse pattern */
+ index = 0;
+
+ /* The first not void item should be e-tag. */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ if (!item->spec || !item->mask) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
+ e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+
+ /* Only care about GRP and E cid base. */
+ if (e_tag_mask->epcp_edei_in_ecid_b ||
+ e_tag_mask->in_ecid_e ||
+ e_tag_mask->ecid_e ||
+ e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ /**
+ * grp and e_cid_base are bit fields and only use 14 bits.
+ * e-tag id is taken as little endian by HW.
+ */
+ filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ filter->pool = act_q->index;
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_eth_l2_tunnel_conf *l2_tn_filter,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ret = cons_parse_l2_tn_filter(attr, pattern,
+ actions, l2_tn_filter, error);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Not supported by L2 tunnel filter");
+ return -rte_errno;
+ }
+
+ return ret;
+}
+
+/* Parse to get the attr and action info of flow director rule. */
+static int
+ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark;
+ uint32_t index;
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->priority) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Not support priority.");
+ return -rte_errno;
+ }
+
+ /* parse action */
+ index = 0;
+
+ /* check if the first not void action is QUEUE or DROP. */
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+ act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ act_q = (const struct rte_flow_action_queue *)act->conf;
+ rule->queue = act_q->index;
+ } else { /* drop */
+ rule->fdirflags = IXGBE_FDIRCMD_DROP;
+ }
+
+ /* check if the next not void item is MARK */
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
+ (act->type != RTE_FLOW_ACTION_TYPE_END)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rule->soft_id = 0;
+
+ if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ mark = (const struct rte_flow_action_mark *)act->conf;
+ rule->soft_id = mark->id;
+ index++;
+ NEXT_ITEM_OF_ACTION(act, actions, index);
+ }
+
+ /* check if the next not void item is END */
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
+ * And get the flow director filter info BTW.
+ * UDP/TCP/SCTP PATTERN:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP or SCTP.
+ * The next not void item must be END.
+ * MAC VLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be MAC VLAN.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * UDP/TCP/SCTP pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
+ * dst_addr 192.167.3.50 0xFFFFFFFF
+ * UDP/TCP/SCTP src_port 80 0xFFFF
+ * dst_port 80 0xFFFF
+ * END
+ * MAC VLAN pattern example:
+ * ITEM Spec Mask
+ * ETH dst_addr
+ {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
+ 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * Other members in mask and spec should set to 0x00.
+ * Item->last should be NULL.
+ */
+static int
+ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec;
+ const struct rte_flow_item_ipv4 *ipv4_mask;
+ const struct rte_flow_item_tcp *tcp_spec;
+ const struct rte_flow_item_tcp *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec;
+ const struct rte_flow_item_udp *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec;
+ const struct rte_flow_item_sctp *sctp_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+
+ uint32_t index, j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+
+ /* parse pattern */
+ index = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or TCP or UDP or SCTP.
+ */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mode = RTE_FDIR_MODE_PERFECT;
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Get the MAC info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+ if (item->spec && !item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+ /* Get the dst MAC. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ rule->ixgbe_fdir.formatted.inner_mac[j] =
+ eth_spec->dst.addr_bytes[j];
+ }
+ }
+
+
+ if (item->mask) {
+ /* If ethernet has meaning, it means MAC VLAN mode. */
+ rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
+
+ rule->b_mask = TRUE;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /**
+ * src MAC address must be masked,
+ * and don't support dst MAC address mask.
+ */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j] ||
+ eth_mask->dst.addr_bytes[j] != 0xFF) {
+ memset(rule, 0,
+ sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no VLAN, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+ }
+ /*** If both spec and mask are item,
+ * it means don't care about ETH.
+ * Do nothing.
+ */
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ } else {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!(item->spec && item->mask)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+ rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
+ /* More than one tags are not supported. */
+
+ /**
+ * Check if the next not void item is not vlan.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the IP info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_IPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst addresses,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ ipv4_mask =
+ (const struct rte_flow_item_ipv4 *)item->mask;
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.type_of_service ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.packet_id ||
+ ipv4_mask->hdr.fragment_offset ||
+ ipv4_mask->hdr.time_to_live ||
+ ipv4_mask->hdr.next_proto_id ||
+ ipv4_mask->hdr.hdr_checksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
+ rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ ipv4_spec =
+ (const struct rte_flow_item_ipv4 *)item->spec;
+ rule->ixgbe_fdir.formatted.dst_ip[0] =
+ ipv4_spec->hdr.dst_addr;
+ rule->ixgbe_fdir.formatted.src_ip[0] =
+ ipv4_spec->hdr.src_addr;
+ }
+
+ /**
+ * Check if the next not void item is
+ * TCP or UDP or SCTP or END.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+ item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the TCP info. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_TCPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = tcp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ tcp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ tcp_spec->hdr.dst_port;
+ }
+ }
+
+ /* Get the UDP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_UDPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = udp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ udp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ udp_spec->hdr.dst_port;
+ }
+ }
+
+ /* Get the SCTP info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
+ /**
+ * Set the flow type even if there's no content
+ * as we must have a flow type.
+ */
+ rule->ixgbe_fdir.formatted.flow_type =
+ IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ /**
+ * Only care about src & dst ports,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ sctp_mask =
+ (const struct rte_flow_item_sctp *)item->mask;
+ if (sctp_mask->hdr.tag ||
+ sctp_mask->hdr.cksum) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ rule->mask.src_port_mask = sctp_mask->hdr.src_port;
+ rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ sctp_spec =
+ (const struct rte_flow_item_sctp *)item->spec;
+ rule->ixgbe_fdir.formatted.src_port =
+ sctp_spec->hdr.src_port;
+ rule->ixgbe_fdir.formatted.dst_port =
+ sctp_spec->hdr.dst_port;
+ }
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+#define NVGRE_PROTOCOL 0x6558
+
+/**
+ * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
+ * And get the flow director filter info BTW.
+ * VxLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * NVGRE PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * VxLAN pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * UDP NULL NULL
+ * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * NEGRV pattern example:
+ * ITEM Spec Mask
+ * ETH NULL NULL
+ * IPV4/IPV6 NULL NULL
+ * NVGRE protocol 0x6558 0xFFFF
+ * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
+ * MAC VLAN tci 0x2016 0xEFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *item;
+ const struct rte_flow_item_vxlan *vxlan_spec;
+ const struct rte_flow_item_vxlan *vxlan_mask;
+ const struct rte_flow_item_nvgre *nvgre_spec;
+ const struct rte_flow_item_nvgre *nvgre_mask;
+ const struct rte_flow_item_eth *eth_spec;
+ const struct rte_flow_item_eth *eth_mask;
+ const struct rte_flow_item_vlan *vlan_spec;
+ const struct rte_flow_item_vlan *vlan_mask;
+ uint32_t index, j;
+
+ if (!pattern) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+ NULL, "NULL pattern.");
+ return -rte_errno;
+ }
+
+ if (!actions) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+ NULL, "NULL action.");
+ return -rte_errno;
+ }
+
+ if (!attr) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "NULL attribute.");
+ return -rte_errno;
+ }
+
+ /**
+ * Some fields may not be provided. Set spec to 0 and mask to default
+ * value. So, we need not do anything for the not provided fields later.
+ */
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
+ rule->mask.vlan_tci_mask = 0;
+
+ /* parse pattern */
+ index = 0;
+
+ /**
+ * The first not void item should be
+ * MAC or IPv4 or IPv6 or UDP or VxLAN.
+ */
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+ item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
+
+ /* Skip MAC. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is IPv4 or IPv6. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip IP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is UDP or NVGRE. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+ item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Skip UDP. */
+ if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
+ /* Only used to describe the protocol stack. */
+ if (item->spec || item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ /* Check if the next not void item is VxLAN. */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* Get the VxLAN info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ rule->ixgbe_fdir.formatted.tunnel_type =
+ RTE_FDIR_TUNNEL_TYPE_VXLAN;
+
+ /* Only care about VNI, others should be masked. */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+
+ /* Tunnel type is always meaningful. */
+ rule->mask.tunnel_type_mask = 1;
+
+ vxlan_mask =
+ (const struct rte_flow_item_vxlan *)item->mask;
+ if (vxlan_mask->flags) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* VNI must be totally masked or not. */
+ if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
+ vxlan_mask->vni[2]) &&
+ ((vxlan_mask->vni[0] != 0xFF) ||
+ (vxlan_mask->vni[1] != 0xFF) ||
+ (vxlan_mask->vni[2] != 0xFF))) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
+ RTE_DIM(vxlan_mask->vni));
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ vxlan_spec = (const struct rte_flow_item_vxlan *)
+ item->spec;
+ rte_memcpy(((uint8_t *)
+ &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
+ rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
+ rule->ixgbe_fdir.formatted.tni_vni);
+ }
+ }
+
+ /* Get the NVGRE info */
+ if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
+ rule->ixgbe_fdir.formatted.tunnel_type =
+ RTE_FDIR_TUNNEL_TYPE_NVGRE;
+
+ /**
+ * Only care about flags0, flags1, protocol and TNI,
+ * others should be masked.
+ */
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+
+ /* Tunnel type is always meaningful. */
+ rule->mask.tunnel_type_mask = 1;
+
+ nvgre_mask =
+ (const struct rte_flow_item_nvgre *)item->mask;
+ if (nvgre_mask->flow_id) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ if (nvgre_mask->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x3000) ||
+ nvgre_mask->protocol != 0xFFFF) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* TNI must be totally masked or not. */
+ if (nvgre_mask->tni[0] &&
+ ((nvgre_mask->tni[0] != 0xFF) ||
+ (nvgre_mask->tni[1] != 0xFF) ||
+ (nvgre_mask->tni[2] != 0xFF))) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* tni is a 24-bits bit field */
+ rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
+ RTE_DIM(nvgre_mask->tni));
+ rule->mask.tunnel_id_mask <<= 8;
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ nvgre_spec =
+ (const struct rte_flow_item_nvgre *)item->spec;
+ if (nvgre_spec->c_k_s_rsvd0_ver !=
+ rte_cpu_to_be_16(0x2000) ||
+ nvgre_spec->protocol !=
+ rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /* tni is a 24-bits bit field */
+ rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
+ nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
+ rule->ixgbe_fdir.formatted.tni_vni <<= 8;
+ }
+ }
+
+ /* check if the next not void item is MAC */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /**
+ * Only support vlan and dst MAC address,
+ * others should be masked.
+ */
+
+ if (!item->mask) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+ rule->b_mask = TRUE;
+ eth_mask = (const struct rte_flow_item_eth *)item->mask;
+
+ /* Ether type should be masked. */
+ if (eth_mask->type) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ /* src MAC address should be masked. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ if (eth_mask->src.addr_bytes[j]) {
+ memset(rule, 0,
+ sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+ rule->mask.mac_addr_byte_mask = 0;
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ /* It's a per byte mask. */
+ if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+ rule->mask.mac_addr_byte_mask |= 0x1 << j;
+ } else if (eth_mask->dst.addr_bytes[j]) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /* When no vlan, considered as full mask. */
+ rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
+
+ if (item->spec) {
+ rule->b_spec = TRUE;
+ eth_spec = (const struct rte_flow_item_eth *)item->spec;
+
+ /* Get the dst MAC. */
+ for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ rule->ixgbe_fdir.formatted.inner_mac[j] =
+ eth_spec->dst.addr_bytes[j];
+ }
+ }
+
+ /**
+ * Check if the next not void item is vlan or ipv4.
+ * IPv6 is not supported.
+ */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+ if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
+ (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ /*Not supported last point for range*/
+ if (item->last) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ item, "Not supported last point for range");
+ return -rte_errno;
+ }
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!(item->spec && item->mask)) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+
+ vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
+ vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+
+ rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
+
+ rule->mask.vlan_tci_mask = vlan_mask->tci;
+ rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
+ /* More than one tags are not supported. */
+
+ /* check if the next not void item is END */
+ index++;
+ NEXT_ITEM_OF_PATTERN(item, pattern, index);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by fdir filter");
+ return -rte_errno;
+ }
+ }
+
+ /**
+ * If the tags is 0, it means don't care about the VLAN.
+ * Do nothing.
+ */
+
+ return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
+}
+
+static int
+ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct ixgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540 &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a)
+ return -ENOTSUP;
+
+ ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
+ actions, rule, error);
+
+ if (!ret)
+ goto step_next;
+
+ ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
+ actions, rule, error);
+
+step_next:
+ if (fdir_mode == RTE_FDIR_MODE_NONE ||
+ fdir_mode != rule->mode)
+ return -ENOTSUP;
+ return ret;
+}
+
+void
+ixgbe_filterlist_flush(void)
+{
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+ while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr,
+ entries);
+ rte_free(ntuple_filter_ptr);
+ }
+
+ while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr,
+ entries);
+ rte_free(ethertype_filter_ptr);
+ }
+
+ while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ rte_free(syn_filter_ptr);
+ }
+
+ while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr,
+ entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+
+ while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr,
+ entries);
+ rte_free(fdir_rule_ptr);
+ }
+
+ while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr,
+ entries);
+ rte_free(ixgbe_flow_mem_ptr->flow);
+ rte_free(ixgbe_flow_mem_ptr);
+ }
+}
+
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+ixgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_hw_fdir_info *fdir_info =
+ IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct rte_flow *flow = NULL;
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+ flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
+ if (!flow) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ return (struct rte_flow *)flow;
+ }
+ ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
+ sizeof(struct ixgbe_flow_mem), 0);
+ if (!ixgbe_flow_mem_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ rte_free(flow);
+ return NULL;
+ }
+ ixgbe_flow_mem_ptr->flow = flow;
+ TAILQ_INSERT_TAIL(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret) {
+ ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+ if (!ret) {
+ ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
+ sizeof(struct ixgbe_ntuple_filter_ele), 0);
+ (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+ &ntuple_filter,
+ sizeof(struct rte_eth_ntuple_filter));
+ TAILQ_INSERT_TAIL(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ flow->rule = ntuple_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret) {
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ &ethertype_filter, TRUE);
+ if (!ret) {
+ ethertype_filter_ptr = rte_zmalloc(
+ "ixgbe_ethertype_filter",
+ sizeof(struct ixgbe_ethertype_filter_ele), 0);
+ (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+ &ethertype_filter,
+ sizeof(struct rte_eth_ethertype_filter));
+ TAILQ_INSERT_TAIL(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ flow->rule = ethertype_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret) {
+ ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
+ if (!ret) {
+ syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
+ sizeof(struct ixgbe_eth_syn_filter_ele), 0);
+ (void)rte_memcpy(&syn_filter_ptr->filter_info,
+ &syn_filter,
+ sizeof(struct rte_eth_syn_filter));
+ TAILQ_INSERT_TAIL(&filter_syn_list,
+ syn_filter_ptr,
+ entries);
+ flow->rule = syn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_SYN;
+ return flow;
+ }
+ goto out;
+ }
+
+ memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret) {
+ /* A mask cannot be deleted. */
+ if (fdir_rule.b_mask) {
+ if (!fdir_info->mask_added) {
+ /* It's the first time the mask is set. */
+ rte_memcpy(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct ixgbe_hw_fdir_mask));
+ ret = ixgbe_fdir_set_input_mask(dev);
+ if (ret)
+ goto out;
+
+ fdir_info->mask_added = TRUE;
+ } else {
+ /**
+ * Only support one global mask,
+ * all the masks should be the same.
+ */
+ ret = memcmp(&fdir_info->mask,
+ &fdir_rule.mask,
+ sizeof(struct ixgbe_hw_fdir_mask));
+ if (ret)
+ goto out;
+ }
+ }
+
+ if (fdir_rule.b_spec) {
+ ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
+ FALSE, FALSE);
+ if (!ret) {
+ fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
+ sizeof(struct ixgbe_fdir_rule_ele), 0);
+ (void)rte_memcpy(&fdir_rule_ptr->filter_info,
+ &fdir_rule,
+ sizeof(struct ixgbe_fdir_rule));
+ TAILQ_INSERT_TAIL(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ flow->rule = fdir_rule_ptr;
+ flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+ return flow;
+ }
+
+ if (ret)
+ goto out;
+ }
+
+ goto out;
+ }
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+ if (!ret) {
+ ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
+ if (!ret) {
+ l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
+ sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
+ (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
+ &l2_tn_filter,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ flow->rule = l2_tn_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+ return flow;
+ }
+ }
+
+out:
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to create flow.");
+ rte_free(ixgbe_flow_mem_ptr);
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ int ret;
+
+ memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
+ actions, &ntuple_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
+ actions, &ethertype_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_parse_syn_filter(dev, attr, pattern,
+ actions, &syn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
+ actions, &fdir_rule, error);
+ if (!ret)
+ return 0;
+
+ memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
+ actions, &l2_tn_filter, error);
+
+ return ret;
+}
+
+/* Destroy a flow rule on ixgbe. */
+static int
+ixgbe_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_flow *pmd_flow = flow;
+ enum rte_filter_type filter_type = pmd_flow->filter_type;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ struct rte_eth_ethertype_filter ethertype_filter;
+ struct rte_eth_syn_filter syn_filter;
+ struct ixgbe_fdir_rule fdir_rule;
+ struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+ struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+ struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+ struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+ struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&ntuple_filter,
+ &ntuple_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ntuple_filter));
+ ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ntuple_list,
+ ntuple_filter_ptr, entries);
+ rte_free(ntuple_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&ethertype_filter,
+ &ethertype_filter_ptr->filter_info,
+ sizeof(struct rte_eth_ethertype_filter));
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ &ethertype_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_ethertype_list,
+ ethertype_filter_ptr, entries);
+ rte_free(ethertype_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_SYN:
+ syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&syn_filter,
+ &syn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_syn_filter));
+ ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_syn_list,
+ syn_filter_ptr, entries);
+ rte_free(syn_filter_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
+ (void)rte_memcpy(&fdir_rule,
+ &fdir_rule_ptr->filter_info,
+ sizeof(struct ixgbe_fdir_rule));
+ ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_fdir_list,
+ fdir_rule_ptr, entries);
+ rte_free(fdir_rule_ptr);
+ }
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
+ pmd_flow->rule;
+ (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+ sizeof(struct rte_eth_l2_tunnel_conf));
+ ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_l2_tunnel_list,
+ l2_tn_filter_ptr, entries);
+ rte_free(l2_tn_filter_ptr);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to destroy flow");
+ return ret;
+ }
+
+ TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
+ if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
+ TAILQ_REMOVE(&ixgbe_flow_list,
+ ixgbe_flow_mem_ptr, entries);
+ rte_free(ixgbe_flow_mem_ptr);
+ }
+ }
+ rte_free(flow);
+
+ return ret;
+}
+
+/* Destroy all flow rules associated with a port on ixgbe. */
+static int
+ixgbe_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ixgbe_clear_all_ntuple_filter(dev);
+ ixgbe_clear_all_ethertype_filter(dev);
+ ixgbe_clear_syn_filter(dev);
+
+ ret = ixgbe_clear_all_fdir_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ret = ixgbe_clear_all_l2_tn_filter(dev);
+ if (ret < 0) {
+ rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failed to flush rule");
+ return ret;
+ }
+
+ ixgbe_filterlist_flush();
+
+ return 0;
+}
+
+const struct rte_flow_ops ixgbe_flow_ops = {
+ ixgbe_flow_validate,
+ ixgbe_flow_create,
+ ixgbe_flow_destroy,
+ ixgbe_flow_flush,
+ NULL,
+};
diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c
index 26395e41..d88832e5 100644
--- a/drivers/net/ixgbe/ixgbe_pf.c
+++ b/drivers/net/ixgbe/ixgbe_pf.c
@@ -61,7 +61,9 @@
static inline uint16_t
dev_num_vf(struct rte_eth_dev *eth_dev)
{
- return eth_dev->pci_dev->max_vfs;
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(eth_dev);
+
+ return pci_dev->max_vfs;
}
static inline
@@ -176,6 +178,7 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
IXGBE_DEV_PRIVATE_TO_FILTER_INFO(eth_dev->data->dev_private);
uint16_t vf_num;
int i;
+ struct ixgbe_ethertype_filter ethertype_filter;
if (!hw->mac.ops.set_ethertype_anti_spoofing) {
RTE_LOG(INFO, PMD, "ether type anti-spoofing is not"
@@ -183,16 +186,23 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev)
return;
}
- /* occupy an entity of ether type filter */
- for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
- if (!(filter_info->ethertype_mask & (1 << i))) {
- filter_info->ethertype_mask |= 1 << i;
- filter_info->ethertype_filters[i] =
- IXGBE_ETHERTYPE_FLOW_CTRL;
- break;
- }
+ i = ixgbe_ethertype_filter_lookup(filter_info,
+ IXGBE_ETHERTYPE_FLOW_CTRL);
+ if (i >= 0) {
+ RTE_LOG(ERR, PMD, "A ether type filter"
+ " entity for flow control already exists!\n");
+ return;
}
- if (i == IXGBE_MAX_ETQF_FILTERS) {
+
+ ethertype_filter.ethertype = IXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqf = IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETHERTYPE_FLOW_CTRL;
+ ethertype_filter.etqs = 0;
+ ethertype_filter.conf = TRUE;
+ i = ixgbe_ethertype_filter_insert(filter_info,
+ &ethertype_filter);
+ if (i < 0) {
RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter"
" entity for flow control.\n");
return;
@@ -387,15 +397,27 @@ ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
uint32_t reg_offset, vf_shift;
const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
+ uint8_t nb_q_per_pool;
+ int i;
vf_shift = vf & VFRE_MASK;
reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
- /* enable transmit and receive for vf */
+ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* enable all queue drop for IOV */
+ nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ for (i = vf * nb_q_per_pool; i < (vf + 1) * nb_q_per_pool; i++) {
+ IXGBE_WRITE_FLUSH(hw);
+ reg = IXGBE_QDE_ENABLE | IXGBE_QDE_WRITE;
+ reg |= i << IXGBE_QDE_IDX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg);
+ }
+
+ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
reg |= (reg | (1 << vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
diff --git a/drivers/net/ixgbe/ixgbe_regs.h b/drivers/net/ixgbe/ixgbe_regs.h
index 773e1693..2aa48201 100644
--- a/drivers/net/ixgbe/ixgbe_regs.h
+++ b/drivers/net/ixgbe/ixgbe_regs.h
@@ -41,7 +41,7 @@ struct reg_info {
uint32_t count;
uint32_t stride;
const char *name;
-} reg_info;
+};
static const struct reg_info ixgbe_regs_general[] = {
{IXGBE_CTRL, 1, 1, "IXGBE_CTRL"},
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c
index c61ce470..1e078959 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx.c
@@ -70,6 +70,7 @@
#include <rte_string_fns.h>
#include <rte_errno.h>
#include <rte_ip.h>
+#include <rte_net.h>
#include "ixgbe_logs.h"
#include "base/ixgbe_api.h"
@@ -79,13 +80,23 @@
#include "base/ixgbe_common.h"
#include "ixgbe_rxtx.h"
+#ifdef RTE_LIBRTE_IEEE1588
+#define IXGBE_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#else
+#define IXGBE_TX_IEEE1588_TMST 0
+#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG | \
- PKT_TX_OUTER_IP_CKSUM)
+ PKT_TX_MACSEC | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ IXGBE_TX_IEEE1588_TMST)
+
+#define IXGBE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IXGBE_TX_OFFLOAD_MASK)
#if 1
#define RTE_PMD_USE_PREFETCH
@@ -100,6 +111,11 @@
#define rte_ixgbe_prefetch(p) do {} while (0)
#endif
+#ifdef RTE_IXGBE_INC_VECTOR
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+#endif
+
/*********************************************************************
*
* TX functions
@@ -131,7 +147,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) {
/* free buffers one at a time */
- m = __rte_pktmbuf_prefree_seg(txep->mbuf);
+ m = rte_pktmbuf_prefree_seg(txep->mbuf);
txep->mbuf = NULL;
if (unlikely(m == NULL))
@@ -321,7 +337,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, txq->tx_tail);
return nb_pkts;
}
@@ -352,6 +368,30 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
+#ifdef RTE_IXGBE_INC_VECTOR
+static uint16_t
+ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx = 0;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ while (nb_pkts) {
+ uint16_t ret, num;
+
+ num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+ ret = ixgbe_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
+ num);
+ nb_tx += ret;
+ nb_pkts -= ret;
+ if (ret < num)
+ break;
+ }
+
+ return nb_tx;
+}
+#endif
+
static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
@@ -519,6 +559,8 @@ tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
cmdtype |= IXGBE_ADVTXD_DCMD_TSE;
if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
cmdtype |= (1 << IXGBE_ADVTXD_OUTERIPCS_SHIFT);
+ if (ol_flags & PKT_TX_MACSEC)
+ cmdtype |= IXGBE_ADVTXD_MAC_LINKSEC;
return cmdtype;
}
@@ -897,7 +939,7 @@ end_of_tx:
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+ IXGBE_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
@@ -905,6 +947,57 @@ end_of_tx:
/*********************************************************************
*
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ int i, ret;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /**
+ * Check if packet meets requirements for number of segments
+ *
+ * NOTE: for ixgbe it's always (40 - WTHRESH) for both TSO and
+ * non-TSO
+ */
+
+ if (m->nb_segs > IXGBE_TX_MAX_SEG - txq->wthresh) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (ol_flags & IXGBE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+/*********************************************************************
+ *
* RX functions
*
**********************************************************************/
@@ -1492,8 +1585,6 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq, bool reset_mbuf)
/* populate the static rte mbuf fields */
mb = rxep[i].mbuf;
if (reset_mbuf) {
- mb->next = NULL;
- mb->nb_segs = 1;
mb->port = rxq->port_id;
}
@@ -1583,7 +1674,8 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
/* update tail pointer */
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, cur_free_trigger);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ cur_free_trigger);
}
if (rxq->rx_tail >= rxq->nb_rx_desc)
@@ -1987,8 +2079,8 @@ next_desc:
if (!ixgbe_rx_alloc_bufs(rxq, false)) {
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr,
- next_rdt);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr,
+ next_rdt);
nb_hold -= rxq->rx_free_thresh;
} else {
PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
@@ -2100,12 +2192,6 @@ next_desc:
goto next_desc;
}
- /*
- * This is the last buffer of the received packet - return
- * the current cluster to the user.
- */
- rxm->next = NULL;
-
/* Initialize the first mbuf of the returned packet */
ixgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
@@ -2159,7 +2245,7 @@ next_desc:
rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
rte_wmb();
- IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, prev_id);
+ IXGBE_PCI_REG_WRITE_RELAXED(rxq->rdt_reg_addr, prev_id);
nb_hold = 0;
}
@@ -2284,6 +2370,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
if (((txq->txq_flags & IXGBE_SIMPLE_FLAGS) == IXGBE_SIMPLE_FLAGS)
&& (txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
+ dev->tx_pkt_prepare = NULL;
#ifdef RTE_IXGBE_INC_VECTOR
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
@@ -2304,6 +2391,7 @@ ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
(unsigned long)txq->tx_rs_thresh,
(unsigned long)RTE_PMD_IXGBE_TX_MAX_BURST);
dev->tx_pkt_burst = ixgbe_xmit_pkts;
+ dev->tx_pkt_prepare = ixgbe_prep_pkts;
}
}
@@ -2587,7 +2675,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
* rxq->rx_free_thresh >= RTE_PMD_IXGBE_RX_MAX_BURST
* rxq->rx_free_thresh < rxq->nb_rx_desc
* (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
- * rxq->nb_rx_desc<(IXGBE_MAX_RING_DESC-RTE_PMD_IXGBE_RX_MAX_BURST)
* Scattered packets are not supported. This should be checked
* outside of this function.
*/
@@ -2609,15 +2696,6 @@ check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
"rxq->rx_free_thresh=%d",
rxq->nb_rx_desc, rxq->rx_free_thresh);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc <
- (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST))) {
- PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
- "rxq->nb_rx_desc=%d, "
- "IXGBE_MAX_RING_DESC=%d, "
- "RTE_PMD_IXGBE_RX_MAX_BURST=%d",
- rxq->nb_rx_desc, IXGBE_MAX_RING_DESC,
- RTE_PMD_IXGBE_RX_MAX_BURST);
- ret = -EINVAL;
}
return ret;
@@ -2634,12 +2712,7 @@ ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
/*
* By default, the Rx queue setup function allocates enough memory for
* IXGBE_MAX_RING_DESC. The Rx Burst bulk allocation function requires
- * extra memory at the end of the descriptor ring to be zero'd out. A
- * pre-condition for using the Rx burst bulk alloc function is that the
- * number of descriptors is less than or equal to
- * (IXGBE_MAX_RING_DESC - RTE_PMD_IXGBE_RX_MAX_BURST). Check all the
- * constraints here to see if we need to zero out memory after the end
- * of the H/W descriptor ring.
+ * extra memory at the end of the descriptor ring to be zero'd out.
*/
if (adapter->rx_bulk_alloc_allowed)
/* zero out extra memory */
@@ -2859,11 +2932,6 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
struct ixgbe_rx_queue *rxq;
uint32_t desc = 0;
- if (rx_queue_id >= dev->data->nb_rx_queues) {
- PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id);
- return 0;
- }
-
rxq = dev->data->rx_queues[rx_queue_id];
rxdp = &(rxq->rx_ring[rxq->rx_tail]);
@@ -2898,6 +2966,63 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
}
+int
+ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct ixgbe_rx_queue *rxq = rx_queue;
+ volatile uint32_t *status;
+ uint32_t nb_hold, desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+#ifdef RTE_IXGBE_INC_VECTOR
+ if (rxq->rx_using_sse)
+ nb_hold = rxq->rxrearm_nb;
+ else
+#endif
+ nb_hold = rxq->nb_rx_hold;
+ if (offset >= rxq->nb_rx_desc - nb_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].wb.upper.status_error;
+ if (*status & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct ixgbe_tx_queue *txq = tx_queue;
+ volatile uint32_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].wb.status;
+ if (*status & rte_cpu_to_le_32(IXGBE_ADVTXD_STAT_DD))
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void __attribute__((cold))
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
@@ -3323,7 +3448,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
- uint32_t q;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
@@ -3343,18 +3467,6 @@ ixgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
reg |= IXGBE_MTQC_VT_ENA;
IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
- if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
- /* Disable drop for all queues in VMDQ mode*/
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
- } else {
- /* Enable drop for all queues in SRIOV mode */
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT) | IXGBE_QDE_ENABLE));
- }
-
/* Enable the Tx desc arbiter */
reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
reg &= ~IXGBE_RTTDCS_ARBDIS;
@@ -3488,16 +3600,18 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev,
/**
* ixgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
- * @hw: pointer to hardware structure
+ * @dev: pointer to eth_dev structure
* @dcb_config: pointer to ixgbe_dcb_config structure
*/
static void
-ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
+ struct ixgbe_dcb_config *dcb_config)
{
uint32_t reg;
uint32_t vlanctrl;
uint8_t i;
+ uint32_t q;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
PMD_INIT_FUNC_TRACE();
/*
@@ -3535,6 +3649,21 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw,
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
+
+ if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
+ /* Disable drop for all queues in VMDQ mode*/
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT)));
+ } else {
+ /* Enable drop for all queues in SRIOV mode */
+ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++)
+ IXGBE_WRITE_REG(hw, IXGBE_QDE,
+ (IXGBE_QDE_WRITE |
+ (q << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
+ }
}
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
@@ -3625,6 +3754,8 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
uint32_t max_frame = dev->data->mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_bw_conf *bw_conf =
+ IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
@@ -3647,7 +3778,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Get dcb TX configuration parameters from rte_eth_conf */
ixgbe_dcb_rx_config(dev, dcb_config);
/*Configure general DCB RX parameters*/
- ixgbe_dcb_rx_hw_config(hw, dcb_config);
+ ixgbe_dcb_rx_hw_config(dev, dcb_config);
break;
default:
PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
@@ -3696,8 +3827,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
/* Re-configure 4 TCs BW */
for (i = 0; i < nb_tcs; i++) {
tc = &dcb_config->tc_config[i];
- tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
- (uint8_t)(100 / nb_tcs);
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs);
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
(uint8_t)(100 / nb_tcs);
}
@@ -3706,6 +3838,16 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev,
tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = 0;
}
+ } else {
+ /* Re-configure 8 TCs BW */
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ if (bw_conf->tc_num != nb_tcs)
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent =
+ (uint8_t)(100 / nb_tcs + (i & 1));
+ }
}
switch (hw->mac.type) {
@@ -4083,9 +4225,8 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
break;
}
} else {
- /*
- * SRIOV active scheme
- * Support RSS together with VMDq & SRIOV
+ /* SRIOV active scheme
+ * Support RSS together with SRIOV.
*/
switch (dev->data->dev_conf.rxmode.mq_mode) {
case ETH_MQ_RX_RSS:
@@ -4093,10 +4234,13 @@ ixgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
ixgbe_config_vf_rss(dev);
break;
case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_DCB:
+ /* In SRIOV, the configuration is the same as VMDq case */
ixgbe_vmdq_dcb_configure(dev);
break;
- /* FIXME if support DCB/RSS together with VMDq & SRIOV */
+ /* DCB/RSS together with SRIOV is not supported */
case ETH_MQ_RX_VMDQ_DCB_RSS:
+ case ETH_MQ_RX_DCB_RSS:
PMD_INIT_LOG(ERR,
"Could not support DCB/RSS with VMDq & SRIOV");
return -1;
@@ -4378,6 +4522,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
bool rsc_capable = false;
uint16_t i;
uint32_t rdrxctl;
+ uint32_t rfctl;
/* Sanity check */
dev->dev_ops->dev_infos_get(dev, &dev_info);
@@ -4405,22 +4550,18 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
}
/* RFCTL configuration */
- if (rsc_capable) {
- uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
-
- if (rx_conf->enable_lro)
- /*
- * Since NFS packets coalescing is not supported - clear
- * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
- * enabled.
- */
- rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
- IXGBE_RFCTL_NFSR_DIS);
- else
- rfctl |= IXGBE_RFCTL_RSC_DIS;
-
- IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
- }
+ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL);
+ if ((rsc_capable) && (rx_conf->enable_lro))
+ /*
+ * Since NFS packets coalescing is not supported - clear
+ * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is
+ * enabled.
+ */
+ rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS |
+ IXGBE_RFCTL_NFSR_DIS);
+ else
+ rfctl |= IXGBE_RFCTL_RSC_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl);
/* If LRO hasn't been requested - we are done here. */
if (!rx_conf->enable_lro)
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index 2608b364..1ffab4cc 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -67,7 +67,7 @@
#define RTE_IXGBE_MAX_RX_BURST RTE_IXGBE_RXQ_REARM_THRESH
#endif
-#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_IXGBE_DESCS_PER_LOOP - 1) * \
+#define RX_RING_SZ ((IXGBE_MAX_RING_DESC + RTE_PMD_IXGBE_RX_MAX_BURST) * \
sizeof(union ixgbe_adv_rx_desc))
#ifdef RTE_PMD_PACKET_PREFETCH
@@ -80,6 +80,8 @@
#define RTE_IXGBE_WAIT_100_US 100
#define RTE_IXGBE_VMTXSW_REGISTER_COUNT 2
+#define IXGBE_TX_MAX_SEG 40
+
#define IXGBE_PACKET_TYPE_MASK_82599 0X7F
#define IXGBE_PACKET_TYPE_MASK_X550 0X10FF
#define IXGBE_PACKET_TYPE_MASK_TUNNEL 0XFF
@@ -309,8 +311,8 @@ void ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq);
#ifdef RTE_IXGBE_INC_VECTOR
-uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+uint16_t ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
int ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq);
#endif /* RTE_IXGBE_INC_VECTOR */
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
index a3473b98..1c34bb5f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h
@@ -123,12 +123,12 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
* tx_next_dd - (tx_rs_thresh-1)
*/
txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)];
- m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[0].mbuf);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -143,7 +143,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
} else {
for (i = 1; i < n; i++) {
- m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+ m = rte_pktmbuf_prefree_seg(txep[i].mbuf);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
@@ -310,13 +310,6 @@ ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev)
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
-#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
- if (rxmode->hw_vlan_strip != 0 ||
- rxmode->hw_vlan_extend != 0)
- return -1;
-#endif
-
/* no fdir support */
if (fconf->mode != RTE_FDIR_MODE_NONE)
return -1;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
index e2715cb9..44de1caa 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c
@@ -85,9 +85,6 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
/*
* Flush mbuf with pkt template.
* Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
*/
vst1_u8((uint8_t *)&mb0->rearm_data, p);
paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM;
@@ -114,14 +111,6 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-
#define VTAG_SHIFT (3)
static inline void
@@ -170,9 +159,6 @@ desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2,
rx_pkts[2]->ol_flags = vol.e[2];
rx_pkts[3]->ol_flags = vol.e[3];
}
-#else
-#define desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, rx_pkts)
-#endif
/*
* vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
@@ -330,12 +316,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
*(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK;
split_packet += RTE_IXGBE_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP);
@@ -451,8 +431,8 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
}
uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
index abbf2841..a7bc199f 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
+++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c
@@ -82,23 +82,10 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
/* Initialize the mbufs in vector, process 2 mbufs in one loop */
for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) {
__m128i vaddr0, vaddr1;
- uintptr_t p0, p1;
mb0 = rxep[0].mbuf;
mb1 = rxep[1].mbuf;
- /*
- * Flush mbuf with pkt template.
- * Data to be rearmed is 6 bytes long.
- * Though, RX will overwrite ol_flags that are coming next
- * anyway. So overwrite whole 8 bytes with one load:
- * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
- */
- p0 = (uintptr_t)&mb0->rearm_data;
- *(uint64_t *)p0 = rxq->mbuf_initializer;
- p1 = (uintptr_t)&mb1->rearm_data;
- *(uint64_t *)p1 = rxq->mbuf_initializer;
-
/* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
vaddr0 = _mm_loadu_si128((__m128i *)&(mb0->buf_addr));
vaddr1 = _mm_loadu_si128((__m128i *)&(mb1->buf_addr));
@@ -133,23 +120,12 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq)
IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
}
-/* Handling the offload flags (olflags) field takes computation
- * time when receiving packets. Therefore we provide a flag to disable
- * the processing of the olflags field when they are not needed. This
- * gives improved performance, at the cost of losing the offload info
- * in the received packet
- */
-#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE
-
static inline void
-desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
+desc_to_olflags_v(__m128i descs[4], __m128i mbuf_init, uint8_t vlan_flags,
struct rte_mbuf **rx_pkts)
{
__m128i ptype0, ptype1, vtag0, vtag1, csum;
- union {
- uint16_t e[4];
- uint64_t dword;
- } vol;
+ __m128i rearm0, rearm1, rearm2, rearm3;
/* mask everything except rss type */
const __m128i rsstype_msk = _mm_set_epi16(
@@ -228,18 +204,41 @@ desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags,
vtag1 = _mm_or_si128(vtag0, vtag1);
vtag1 = _mm_or_si128(ptype0, vtag1);
- vol.dword = _mm_cvtsi128_si64(vtag1);
- rx_pkts[0]->ol_flags = vol.e[0];
- rx_pkts[1]->ol_flags = vol.e[1];
- rx_pkts[2]->ol_flags = vol.e[2];
- rx_pkts[3]->ol_flags = vol.e[3];
-}
+ /*
+ * At this point, we have the 4 sets of flags in the low 64-bits
+ * of vtag1 (4x16).
+ * We want to extract these, and merge them with the mbuf init data
+ * so we can do a single 16-byte write to the mbuf to set the flags
+ * and all the other initialization fields. Extracting the
+ * appropriate flags means that we have to do a shift and blend for
+ * each mbuf before we do the write.
+ */
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 8), 0x10);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 6), 0x10);
+ rearm2 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 4), 0x10);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(vtag1, 2), 0x10);
+
#else
-#define desc_to_olflags_v(desc, vlan_flags, rx_pkts) do { \
- RTE_SET_USED(vlan_flags); \
- } while (0)
-#endif
+ rearm0 = _mm_slli_si128(vtag1, 14);
+ rearm1 = _mm_slli_si128(vtag1, 12);
+ rearm2 = _mm_slli_si128(vtag1, 10);
+ rearm3 = _mm_slli_si128(vtag1, 8);
+
+ rearm0 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm0, 48));
+ rearm1 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm1, 48));
+ rearm2 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm2, 48));
+ rearm3 = _mm_or_si128(mbuf_init, _mm_srli_epi64(rearm3, 48));
+
+#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
+
+ _mm_store_si128((__m128i *)&rx_pkts[0]->rearm_data, rearm0);
+ _mm_store_si128((__m128i *)&rx_pkts[1]->rearm_data, rearm1);
+ _mm_store_si128((__m128i *)&rx_pkts[2]->rearm_data, rearm2);
+ _mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
+}
/*
* vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP)
@@ -268,6 +267,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0, 0 /* ignore pkt_type field */
);
__m128i dd_check, eop_check;
+ __m128i mbuf_init;
uint8_t vlan_flags;
/* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */
@@ -313,6 +313,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
0xFF, 0xFF
);
+ mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
+
/* Cache is empty -> need to scan the buffer rings, but first move
* the next 'n' mbufs into the cache
*/
@@ -335,9 +337,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
__m128i descs[RTE_IXGBE_DESCS_PER_LOOP];
__m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
__m128i zero, staterr, sterr_tmp1, sterr_tmp2;
- __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */
+ /* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
+ __m128i mbp1;
+#if defined(RTE_ARCH_X86_64)
+ __m128i mbp2;
+#endif
- /* B.1 load 1 mbuf point */
+ /* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
@@ -345,11 +351,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
- /* B.2 copy 2 mbuf point into rx_pkts */
+ /* B.2 copy 2 64 bit or 4 32 bit mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
- /* B.1 load 1 mbuf point */
+#if defined(RTE_ARCH_X86_64)
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]);
+#endif
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
@@ -358,8 +366,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
+#if defined(RTE_ARCH_X86_64)
/* B.2 copy 2 mbuf point into rx_pkts */
_mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
+#endif
if (split_packet) {
rte_mbuf_prefetch_part2(rx_pkts[pos]);
@@ -385,7 +395,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]);
/* set ol_flags with vlan packet type */
- desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]);
+ desc_to_olflags_v(descs, mbuf_init, vlan_flags, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
@@ -425,12 +435,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
/* store the resulting 32-bit value */
*(int *)split_packet = _mm_cvtsi128_si32(eop_bits);
split_packet += RTE_IXGBE_DESCS_PER_LOOP;
-
- /* zero-out next pointers */
- rx_pkts[pos]->next = NULL;
- rx_pkts[pos + 1]->next = NULL;
- rx_pkts[pos + 2]->next = NULL;
- rx_pkts[pos + 3]->next = NULL;
}
/* C.3 calc available number of desc */
@@ -537,8 +541,8 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp,
}
uint16_t
-ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+ixgbe_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue;
volatile union ixgbe_adv_tx_desc *txdp;
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
new file mode 100644
index 00000000..e8fc9a64
--- /dev/null
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.c
@@ -0,0 +1,910 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+
+#include "base/ixgbe_api.h"
+#include "ixgbe_ethdev.h"
+#include "rte_pmd_ixgbe.h"
+
+int
+rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ int rar_entry;
+ uint8_t *new_mac = (uint8_t *)(mac_addr);
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+ ETHER_ADDR_LEN);
+ return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
+ IXGBE_RAH_AV);
+ }
+ return -EINVAL;
+}
+
+int
+rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+ ctrl = IXGBE_PF_CONTROL_MSG;
+ if (vfinfo[vf].clear_to_send)
+ ctrl |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, &ctrl, 1, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+
+ mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+ mac->ops.set_mac_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (vlan_id > ETHER_MAX_VLAN_ID)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+ if (vlan_id) {
+ ctrl = vlan_id;
+ ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
+ } else {
+ ctrl = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ /* enable or disable VMDQ loopback */
+ if (on)
+ ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
+ else
+ ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ int i;
+ int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ for (i = 0; i <= num_queues; i++) {
+ reg_value = IXGBE_QDE_WRITE |
+ (i << IXGBE_QDE_IDX_SHIFT) |
+ (on & IXGBE_QDE_ENABLE);
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ /* only support VF's 0 to 63 */
+ if ((vf >= pci_dev->max_vfs) || (vf > 63))
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
+ if (on)
+ reg_value |= IXGBE_SRRCTL_DROP_EN;
+ else
+ reg_value &= ~IXGBE_SRRCTL_DROP_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+ uint16_t queues_per_pool;
+ uint32_t q;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+ /* The PF has 128 queue pairs and in SRIOV configuration
+ * those queues will be assigned to VF's, so RXDCTL
+ * registers will be dealing with queues which will be
+ * assigned to VF's.
+ * Let's say we have SRIOV configured with 31 VF's then the
+ * first 124 queues 0-123 will be allocated to VF's and only
+ * the last 4 queues 123-127 will be assigned to the PF.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+ ETH_16_POOLS;
+ else
+ queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+ ETH_64_POOLS;
+
+ for (q = 0; q < queues_per_pool; q++)
+ (*dev->dev_ops->vlan_strip_queue_set)(dev,
+ q + vf * queues_per_pool, on);
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
+ uint16_t rx_mask, uint8_t on)
+{
+ int val = 0;
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ struct ixgbe_hw *hw;
+ uint32_t vmolr;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+ " on 82599 hardware and newer");
+ return -ENOTSUP;
+ }
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+ if (on)
+ vmolr |= val;
+ else
+ vmolr &= ~val;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFRE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFRE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (vf >= pci_dev->max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+ if (vf >= 32) {
+ addr = IXGBE_VFTE(1);
+ val = bit1 << (vf - 32);
+ } else {
+ addr = IXGBE_VFTE(0);
+ val = bit1 << vf;
+ }
+
+ reg = IXGBE_READ_REG(hw, addr);
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr, reg);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+ uint64_t vf_mask, uint8_t vlan_on)
+{
+ struct rte_eth_dev *dev;
+ int ret = 0;
+ uint16_t vf_idx;
+ struct ixgbe_hw *hw;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ for (vf_idx = 0; vf_idx < 64; vf_idx++) {
+ if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+ ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
+ vlan_on, false);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Stop the data paths */
+ if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+ return -ENOTSUP;
+ /**
+ * Workaround:
+ * As no ixgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * ixgbe_disable_sec_rx_path().
+ */
+ ixgbe_disable_sec_tx_path_generic(hw);
+
+ /* Enable Ethernet CRC (required by MACsec offload) */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+ /* Enable the TX and RX crypto engines */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+ ctrl |= 0x3;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+ /* Enable SA lookup */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+ ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+ ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+ IXGBE_LSECTXCTRL_AUTH;
+ ctrl |= IXGBE_LSECTXCTRL_AISCI;
+ ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+ ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+ ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+ ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+ if (rp)
+ ctrl |= IXGBE_LSECRXCTRL_RP;
+ else
+ ctrl &= ~IXGBE_LSECRXCTRL_RP;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+ /* Start the data paths */
+ ixgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no ixgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ ixgbe_enable_sec_tx_path_generic(hw);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Stop the data paths */
+ if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+ return -ENOTSUP;
+ /**
+ * Workaround:
+ * As no ixgbe_disable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ * The hardware support has been checked by
+ * ixgbe_disable_sec_rx_path().
+ */
+ ixgbe_disable_sec_tx_path_generic(hw);
+
+ /* Disable the TX and RX crypto engines */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+ ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+ ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+ IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+ /* Disable SA lookup */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+ ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+ ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+ ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+ /* Start the data paths */
+ ixgbe_enable_sec_rx_path(hw);
+ /**
+ * Workaround:
+ * As no ixgbe_enable_sec_rx_path equivalent is
+ * implemented for tx in the base code, and we are
+ * not allowed to modify the base code in DPDK, so
+ * just call the hand-written one directly for now.
+ */
+ ixgbe_enable_sec_tx_path_generic(hw);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+ ctrl = mac[4] | (mac[5] << 8);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+ pi = rte_cpu_to_be_16(pi);
+ ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Set the PN and key */
+ pn = rte_cpu_to_be_32(pn);
+ if (idx == 0) {
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+ }
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+ }
+ }
+
+ /* Set AN and select the SA */
+ ctrl = (an << idx * 2) | (idx << 4);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key)
+{
+ struct ixgbe_hw *hw;
+ struct rte_eth_dev *dev;
+ uint32_t ctrl, i;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (idx != 0 && idx != 1)
+ return -EINVAL;
+
+ if (an >= 4)
+ return -EINVAL;
+
+ /* Set the PN */
+ pn = rte_cpu_to_be_32(pn);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+ /* Set the key */
+ for (i = 0; i < 4; i++) {
+ ctrl = (key[i * 4 + 0] << 0) |
+ (key[i * 4 + 1] << 8) |
+ (key[i * 4 + 2] << 16) |
+ (key[i * 4 + 3] << 24);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+ }
+
+ /* Set the AN and validate the SA */
+ ctrl = an | (1 << 2);
+ IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight)
+{
+ struct rte_eth_dev *dev;
+ struct ixgbe_dcb_config *dcb_config;
+ struct ixgbe_dcb_tc_config *tc;
+ struct rte_eth_conf *eth_conf;
+ struct ixgbe_bw_conf *bw_conf;
+ uint8_t i;
+ uint8_t nb_tcs;
+ uint16_t sum;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_ixgbe_supported(dev))
+ return -ENOTSUP;
+
+ if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
+ PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+ IXGBE_DCB_MAX_TRAFFIC_CLASS);
+ return -EINVAL;
+ }
+
+ dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
+ eth_conf = &dev->data->dev_conf;
+
+ if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+ } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+ ETH_32_POOLS)
+ nb_tcs = ETH_4_TCS;
+ else
+ nb_tcs = ETH_8_TCS;
+ } else {
+ nb_tcs = 1;
+ }
+
+ if (nb_tcs != tc_num) {
+ PMD_DRV_LOG(ERR,
+ "Weight should be set for all %d enabled TCs.",
+ nb_tcs);
+ return -EINVAL;
+ }
+
+ sum = 0;
+ for (i = 0; i < nb_tcs; i++)
+ sum += bw_weight[i];
+ if (sum != 100) {
+ PMD_DRV_LOG(ERR,
+ "The summary of the TC weight should be 100.");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
+ }
+ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ tc = &dcb_config->tc_config[i];
+ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+ }
+
+ bw_conf->tc_num = nb_tcs;
+
+ return 0;
+}
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.h b/drivers/net/ixgbe/rte_pmd_ixgbe.h
index c2fb8261..1f2b1bd7 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe.h
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe.h
@@ -42,6 +42,20 @@
#include <rte_ethdev.h>
/**
+ * Notify VF when PF link status changes.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if *vf* invalid.
+ */
+int rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf);
+
+/**
* Set the VF MAC address.
*
* @param port
@@ -183,6 +197,237 @@ int
rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on);
/**
+ * Enable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param en
+ * 1 - Enable encryption (encrypt and add integrity signature).
+ * 0 - Disable encryption (only add integrity signature).
+ * @param rp
+ * 1 - Enable replay protection.
+ * 0 - Disable replay protection.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp);
+
+/**
+ * Disable MACsec offload.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_disable(uint8_t port);
+
+/**
+ * Configure Tx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac);
+
+/**
+ * Configure Rx SC (Secure Connection).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param mac
+ * The MAC address on the remote side.
+ * @param pi
+ * The PI (port identifier) on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ */
+int rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi);
+
+/**
+ * Enable Tx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1).
+ * @param an
+ * The association number on the local side.
+ * @param pn
+ * The packet number on the local side.
+ * @param key
+ * The key on the local side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+ * Enable Rx SA (Secure Association).
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param idx
+ * The SA to be enabled (0 or 1)
+ * @param an
+ * The association number on the remote side.
+ * @param pn
+ * The packet number on the remote side.
+ * @param key
+ * The key on the remote side.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+ uint32_t pn, uint8_t *key);
+
+/**
+* Set RX L2 Filtering mode of a VF of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param rx_mask
+* The RX mode mask, which is one or more of accepting Untagged Packets,
+* packets that match the PFUTA table, Broadcast and Multicast Promiscuous.
+* ETH_VMDQ_ACCEPT_UNTAG,ETH_VMDQ_ACCEPT_HASH_UC,
+* ETH_VMDQ_ACCEPT_BROADCAST and ETH_VMDQ_ACCEPT_MULTICAST will be used
+* in rx_mode.
+* @param on
+* 1 - Enable a VF RX mode.
+* 0 - Disable a VF RX mode.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on);
+
+/**
+* Enable or disable a VF traffic receive of an Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic receive.
+* 0 - Disable a VF traffic receive.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable or disable a VF traffic transmit of the Ethernet device.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vf
+* VF id.
+* @param on
+* 1 - Enable a VF traffic transmit.
+* 0 - Disable a VF traffic transmit.
+* @return
+* - (0) if successful.
+* - (-ENODEV) if *port_id* invalid.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on);
+
+/**
+* Enable/Disable hardware VF VLAN filtering by an Ethernet device of
+* received VLAN packets tagged with a given VLAN Tag Identifier.
+*
+* @param port
+* The port identifier of the Ethernet device.
+* @param vlan
+* The VLAN Tag Identifier whose filtering must be enabled or disabled.
+* @param vf_mask
+* Bitmap listing which VFs participate in the VLAN filtering.
+* @param vlan_on
+* 1 - Enable VFs VLAN filtering.
+* 0 - Disable VFs VLAN filtering.
+* @return
+* - (0) if successful.
+* - (-ENOTSUP) if hardware doesn't support.
+* - (-ENODEV) if *port_id* invalid.
+* - (-EINVAL) if bad parameter.
+*/
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan, uint64_t vf_mask, uint8_t vlan_on);
+
+/**
+ * Set the rate limitation for a vf on an Ethernet device.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param vf
+ * VF id.
+ * @param tx_rate
+ * The tx rate allocated from the total link speed for this VF id.
+ * @param q_msk
+ * The queue mask which need to set the rate.
+ * @return
+ * - (0) if successful.
+ * - (-ENOTSUP) if hardware doesn't support this feature.
+ * - (-ENODEV) if *port_id* invalid.
+ * - (-EINVAL) if bad parameter.
+ */
+int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf, uint16_t tx_rate, uint64_t q_msk);
+
+/**
+ * Set all the TCs' bandwidth weight.
+ *
+ * The bw_weight means the percentage occupied by the TC.
+ * It can be taken as the relative min bandwidth setting.
+ *
+ * @param port
+ * The port identifier of the Ethernet device.
+ * @param tc_num
+ * Number of TCs.
+ * @param bw_weight
+ * An array of relative bandwidth weight for all the TCs.
+ * The summary of the bw_weight should be 100.
+ * @return
+ * - (0) if successful.
+ * - (-ENODEV) if *port* invalid.
+ * - (-EINVAL) if bad parameter.
+ * - (-ENOTSUP) not supported by firmware.
+ */
+int rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+ uint8_t tc_num,
+ uint8_t *bw_weight);
+
+/**
* Response sent back to ixgbe driver from user app after callback
*/
enum rte_pmd_ixgbe_mb_event_rsp {
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
index 92434f3f..45a57e33 100644
--- a/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
+++ b/drivers/net/ixgbe/rte_pmd_ixgbe_version.map
@@ -15,3 +15,26 @@ DPDK_16.11 {
rte_pmd_ixgbe_set_vf_vlan_insert;
rte_pmd_ixgbe_set_vf_vlan_stripq;
} DPDK_2.0;
+
+DPDK_17.02 {
+ global:
+
+ rte_pmd_ixgbe_macsec_config_rxsc;
+ rte_pmd_ixgbe_macsec_config_txsc;
+ rte_pmd_ixgbe_macsec_disable;
+ rte_pmd_ixgbe_macsec_enable;
+ rte_pmd_ixgbe_macsec_select_rxsa;
+ rte_pmd_ixgbe_macsec_select_txsa;
+ rte_pmd_ixgbe_set_vf_rate_limit;
+ rte_pmd_ixgbe_set_vf_rx;
+ rte_pmd_ixgbe_set_vf_rxmode;
+ rte_pmd_ixgbe_set_vf_tx;
+ rte_pmd_ixgbe_set_vf_vlan_filter;
+} DPDK_16.11;
+
+DPDK_17.05 {
+ global:
+
+ rte_pmd_ixgbe_ping_vf;
+ rte_pmd_ixgbe_set_tc_bw_alloc;
+} DPDK_17.02;
diff --git a/drivers/net/kni/Makefile b/drivers/net/kni/Makefile
new file mode 100644
index 00000000..46a1ad08
--- /dev/null
+++ b/drivers/net/kni/Makefile
@@ -0,0 +1,56 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_kni.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lpthread
+
+EXPORT_MAP := rte_pmd_kni_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_KNI) += rte_eth_kni.c
+
+#
+# Export include files
+#
+SYMLINK-y-include +=
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/kni/rte_eth_kni.c b/drivers/net/kni/rte_eth_kni.c
new file mode 100644
index 00000000..f688d919
--- /dev/null
+++ b/drivers/net/kni/rte_eth_kni.c
@@ -0,0 +1,510 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_kni.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_vdev.h>
+
+/* Only single queue supported */
+#define KNI_MAX_QUEUE_PER_PORT 1
+
+#define MAX_PACKET_SZ 2048
+#define MAX_KNI_PORTS 8
+
+#define ETH_KNI_NO_REQUEST_THREAD_ARG "no_request_thread"
+static const char * const valid_arguments[] = {
+ ETH_KNI_NO_REQUEST_THREAD_ARG,
+ NULL
+};
+
+struct eth_kni_args {
+ int no_request_thread;
+};
+
+struct pmd_queue_stats {
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t err_pkts;
+};
+
+struct pmd_queue {
+ struct pmd_internals *internals;
+ struct rte_mempool *mb_pool;
+
+ struct pmd_queue_stats rx;
+ struct pmd_queue_stats tx;
+};
+
+struct pmd_internals {
+ struct rte_kni *kni;
+ int is_kni_started;
+
+ pthread_t thread;
+ int stop_thread;
+ int no_request_thread;
+
+ struct ether_addr eth_addr;
+
+ struct pmd_queue rx_queues[KNI_MAX_QUEUE_PER_PORT];
+ struct pmd_queue tx_queues[KNI_MAX_QUEUE_PER_PORT];
+};
+
+static const struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_SPEED_AUTONEG,
+};
+static int is_kni_initialized;
+
+static uint16_t
+eth_kni_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct pmd_queue *kni_q = q;
+ struct rte_kni *kni = kni_q->internals->kni;
+ uint16_t nb_pkts;
+
+ nb_pkts = rte_kni_rx_burst(kni, bufs, nb_bufs);
+
+ kni_q->rx.pkts += nb_pkts;
+ kni_q->rx.err_pkts += nb_bufs - nb_pkts;
+
+ return nb_pkts;
+}
+
+static uint16_t
+eth_kni_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
+{
+ struct pmd_queue *kni_q = q;
+ struct rte_kni *kni = kni_q->internals->kni;
+ uint16_t nb_pkts;
+
+ nb_pkts = rte_kni_tx_burst(kni, bufs, nb_bufs);
+
+ kni_q->tx.pkts += nb_pkts;
+ kni_q->tx.err_pkts += nb_bufs - nb_pkts;
+
+ return nb_pkts;
+}
+
+static void *
+kni_handle_request(void *param)
+{
+ struct pmd_internals *internals = param;
+#define MS 1000
+
+ while (!internals->stop_thread) {
+ rte_kni_handle_request(internals->kni);
+ usleep(500 * MS);
+ }
+
+ return param;
+}
+
+static int
+eth_kni_start(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ uint16_t port_id = dev->data->port_id;
+ struct rte_mempool *mb_pool;
+ struct rte_kni_conf conf;
+ const char *name = dev->data->name + 4; /* remove net_ */
+
+ snprintf(conf.name, RTE_KNI_NAMESIZE, "%s", name);
+ conf.force_bind = 0;
+ conf.group_id = port_id;
+ conf.mbuf_size = MAX_PACKET_SZ;
+ mb_pool = internals->rx_queues[0].mb_pool;
+
+ internals->kni = rte_kni_alloc(mb_pool, &conf, NULL);
+ if (internals->kni == NULL) {
+ RTE_LOG(ERR, PMD,
+ "Fail to create kni interface for port: %d\n",
+ port_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+eth_kni_dev_start(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ int ret;
+
+ if (internals->is_kni_started == 0) {
+ ret = eth_kni_start(dev);
+ if (ret)
+ return -1;
+ internals->is_kni_started = 1;
+ }
+
+ if (internals->no_request_thread == 0) {
+ ret = pthread_create(&internals->thread, NULL,
+ kni_handle_request, internals);
+ if (ret) {
+ RTE_LOG(ERR, PMD,
+ "Fail to create kni request thread\n");
+ return -1;
+ }
+ }
+
+ dev->data->dev_link.link_status = 1;
+
+ return 0;
+}
+
+static void
+eth_kni_dev_stop(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ int ret;
+
+ if (internals->no_request_thread == 0) {
+ internals->stop_thread = 1;
+
+ ret = pthread_cancel(internals->thread);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
+
+ ret = pthread_join(internals->thread, NULL);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Can't join the thread\n");
+
+ internals->stop_thread = 0;
+ }
+
+ dev->data->dev_link.link_status = 0;
+}
+
+static int
+eth_kni_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_kni_dev_info(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_dev_info *dev_info)
+{
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = UINT32_MAX;
+ dev_info->max_rx_queues = KNI_MAX_QUEUE_PER_PORT;
+ dev_info->max_tx_queues = KNI_MAX_QUEUE_PER_PORT;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+}
+
+static int
+eth_kni_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mb_pool)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_queue *q;
+
+ q = &internals->rx_queues[rx_queue_id];
+ q->internals = internals;
+ q->mb_pool = mb_pool;
+
+ dev->data->rx_queues[rx_queue_id] = q;
+
+ return 0;
+}
+
+static int
+eth_kni_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_queue *q;
+
+ q = &internals->tx_queues[tx_queue_id];
+ q->internals = internals;
+
+ dev->data->tx_queues[tx_queue_id] = q;
+
+ return 0;
+}
+
+static void
+eth_kni_queue_release(void *q __rte_unused)
+{
+}
+
+static int
+eth_kni_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
+{
+ return 0;
+}
+
+static void
+eth_kni_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ unsigned long rx_packets_total = 0, rx_bytes_total = 0;
+ unsigned long tx_packets_total = 0, tx_bytes_total = 0;
+ struct rte_eth_dev_data *data = dev->data;
+ unsigned long tx_packets_err_total = 0;
+ unsigned int i, num_stats;
+ struct pmd_queue *q;
+
+ num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ data->nb_rx_queues);
+ for (i = 0; i < num_stats; i++) {
+ q = data->rx_queues[i];
+ stats->q_ipackets[i] = q->rx.pkts;
+ stats->q_ibytes[i] = q->rx.bytes;
+ rx_packets_total += stats->q_ipackets[i];
+ rx_bytes_total += stats->q_ibytes[i];
+ }
+
+ num_stats = RTE_MIN((unsigned int)RTE_ETHDEV_QUEUE_STAT_CNTRS,
+ data->nb_tx_queues);
+ for (i = 0; i < num_stats; i++) {
+ q = data->tx_queues[i];
+ stats->q_opackets[i] = q->tx.pkts;
+ stats->q_obytes[i] = q->tx.bytes;
+ stats->q_errors[i] = q->tx.err_pkts;
+ tx_packets_total += stats->q_opackets[i];
+ tx_bytes_total += stats->q_obytes[i];
+ tx_packets_err_total += stats->q_errors[i];
+ }
+
+ stats->ipackets = rx_packets_total;
+ stats->ibytes = rx_bytes_total;
+ stats->opackets = tx_packets_total;
+ stats->obytes = tx_bytes_total;
+ stats->oerrors = tx_packets_err_total;
+}
+
+static void
+eth_kni_stats_reset(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct pmd_queue *q;
+ unsigned int i;
+
+ for (i = 0; i < data->nb_rx_queues; i++) {
+ q = data->rx_queues[i];
+ q->rx.pkts = 0;
+ q->rx.bytes = 0;
+ }
+ for (i = 0; i < data->nb_tx_queues; i++) {
+ q = data->tx_queues[i];
+ q->tx.pkts = 0;
+ q->tx.bytes = 0;
+ q->tx.err_pkts = 0;
+ }
+}
+
+static const struct eth_dev_ops eth_kni_ops = {
+ .dev_start = eth_kni_dev_start,
+ .dev_stop = eth_kni_dev_stop,
+ .dev_configure = eth_kni_dev_configure,
+ .dev_infos_get = eth_kni_dev_info,
+ .rx_queue_setup = eth_kni_rx_queue_setup,
+ .tx_queue_setup = eth_kni_tx_queue_setup,
+ .rx_queue_release = eth_kni_queue_release,
+ .tx_queue_release = eth_kni_queue_release,
+ .link_update = eth_kni_link_update,
+ .stats_get = eth_kni_stats_get,
+ .stats_reset = eth_kni_stats_reset,
+};
+
+static struct rte_vdev_driver eth_kni_drv;
+
+static struct rte_eth_dev *
+eth_kni_create(struct rte_vdev_device *vdev,
+ struct eth_kni_args *args,
+ unsigned int numa_node)
+{
+ struct pmd_internals *internals;
+ struct rte_eth_dev_data *data;
+ struct rte_eth_dev *eth_dev;
+ const char *name;
+
+ RTE_LOG(INFO, PMD, "Creating kni ethdev on numa socket %u\n",
+ numa_node);
+
+ name = rte_vdev_device_name(vdev);
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
+ if (data == NULL)
+ return NULL;
+
+ /* reserve an ethdev entry */
+ eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*internals));
+ if (eth_dev == NULL) {
+ rte_free(data);
+ return NULL;
+ }
+
+ internals = eth_dev->data->dev_private;
+ rte_memcpy(data, eth_dev->data, sizeof(*data));
+ data->nb_rx_queues = 1;
+ data->nb_tx_queues = 1;
+ data->dev_link = pmd_link;
+ data->mac_addrs = &internals->eth_addr;
+
+ eth_random_addr(internals->eth_addr.addr_bytes);
+
+ eth_dev->data = data;
+ eth_dev->dev_ops = &eth_kni_ops;
+
+ data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+
+ internals->no_request_thread = args->no_request_thread;
+
+ return eth_dev;
+}
+
+static int
+kni_init(void)
+{
+ if (is_kni_initialized == 0)
+ rte_kni_init(MAX_KNI_PORTS);
+
+ is_kni_initialized++;
+
+ return 0;
+}
+
+static int
+eth_kni_kvargs_process(struct eth_kni_args *args, const char *params)
+{
+ struct rte_kvargs *kvlist;
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist == NULL)
+ return -1;
+
+ memset(args, 0, sizeof(struct eth_kni_args));
+
+ if (rte_kvargs_count(kvlist, ETH_KNI_NO_REQUEST_THREAD_ARG) == 1)
+ args->no_request_thread = 1;
+
+ rte_kvargs_free(kvlist);
+
+ return 0;
+}
+
+static int
+eth_kni_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct eth_kni_args args;
+ const char *name;
+ const char *params;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ params = rte_vdev_device_args(vdev);
+ RTE_LOG(INFO, PMD, "Initializing eth_kni for %s\n", name);
+
+ ret = eth_kni_kvargs_process(&args, params);
+ if (ret < 0)
+ return ret;
+
+ ret = kni_init();
+ if (ret < 0)
+ return ret;
+
+ eth_dev = eth_kni_create(vdev, &args, rte_socket_id());
+ if (eth_dev == NULL)
+ goto kni_uninit;
+
+ eth_dev->rx_pkt_burst = eth_kni_rx;
+ eth_dev->tx_pkt_burst = eth_kni_tx;
+
+ return 0;
+
+kni_uninit:
+ is_kni_initialized--;
+ if (is_kni_initialized == 0)
+ rte_kni_close();
+ return -1;
+}
+
+static int
+eth_kni_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_eth_dev *eth_dev;
+ struct pmd_internals *internals;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ RTE_LOG(INFO, PMD, "Un-Initializing eth_kni for %s\n", name);
+
+ /* find the ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -1;
+
+ eth_kni_dev_stop(eth_dev);
+
+ internals = eth_dev->data->dev_private;
+ rte_kni_release(internals->kni);
+
+ rte_free(internals);
+ rte_free(eth_dev->data);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ is_kni_initialized--;
+ if (is_kni_initialized == 0)
+ rte_kni_close();
+
+ return 0;
+}
+
+static struct rte_vdev_driver eth_kni_drv = {
+ .probe = eth_kni_probe,
+ .remove = eth_kni_remove,
+};
+
+RTE_PMD_REGISTER_VDEV(net_kni, eth_kni_drv);
+RTE_PMD_REGISTER_PARAM_STRING(net_kni, ETH_KNI_NO_REQUEST_THREAD_ARG "=<int>");
diff --git a/drivers/net/kni/rte_pmd_kni_version.map b/drivers/net/kni/rte_pmd_kni_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/drivers/net/kni/rte_pmd_kni_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/net/liquidio/Makefile b/drivers/net/liquidio/Makefile
new file mode 100644
index 00000000..32c06f5b
--- /dev/null
+++ b/drivers/net/liquidio/Makefile
@@ -0,0 +1,58 @@
+#
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium, Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_lio.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)/base -I$(SRCDIR)
+
+EXPORT_MAP := rte_pmd_lio_version.map
+
+LIBABIVER := 1
+
+VPATH += $(RTE_SDK)/drivers/net/liquidio/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_23xx_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_LIO_PMD) += lio_mbox.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/liquidio/base/lio_23xx_reg.h b/drivers/net/liquidio/base/lio_23xx_reg.h
new file mode 100644
index 00000000..794bc2ca
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_23xx_reg.h
@@ -0,0 +1,194 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_23XX_REG_H_
+#define _LIO_23XX_REG_H_
+
+/* ###################### REQUEST QUEUE ######################### */
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT(0..63)_INSTR_BADDR */
+#define CN23XX_SLI_PKT_INSTR_BADDR_START64 0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT(0..63)_INSTR_BAOFF_DBELL */
+#define CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START 0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT(0..63)_INSTR_FIFO_RSIZE */
+#define CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START 0x10030
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE(0..63)_CNTS */
+#define CN23XX_SLI_PKT_IN_DONE_CNTS_START64 0x10040
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define CN23XX_SLI_PKT_INPUT_CONTROL_START64 0x10000
+
+/* ------- Request Queue Macros --------- */
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_IQ_OFFSET 0x20000
+
+#define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \
+ (CN23XX_SLI_PKT_INPUT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN23XX_SLI_PKT_INSTR_BADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_SIZE(iq) \
+ (CN23XX_SLI_PKT_INSTR_FIFO_RSIZE_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_DOORBELL(iq) \
+ (CN23XX_SLI_PKT_INSTR_BADDR_DBELL_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \
+ (CN23XX_SLI_PKT_IN_DONE_CNTS_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
+#define CN23XX_PKT_INPUT_CTL_IS_64B (1 << 24)
+#define CN23XX_PKT_INPUT_CTL_RST (1 << 23)
+#define CN23XX_PKT_INPUT_CTL_QUIET (1 << 28)
+#define CN23XX_PKT_INPUT_CTL_RING_ENB (1 << 22)
+#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP (1 << 6)
+#define CN23XX_PKT_INPUT_CTL_USE_CSR (1 << 4)
+#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
+
+/* These bits[47:44] select the Physical function number within the MAC */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS 45
+/* These bits[43:32] select the function number within the PF */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS 32
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR)
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR | \
+ CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/* ############################ OUTPUT QUEUE ######################### */
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define CN23XX_SLI_PKT_OUTPUT_CONTROL_START 0x10050
+
+/* 64 registers for Output queue buffer and info size
+ * SLI_PKT(0..63)_OUT_SIZE
+ */
+#define CN23XX_SLI_PKT_OUT_SIZE 0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT(0..63)_SLIST_BADDR */
+#define CN23XX_SLI_SLIST_BADDR_START64 0x10070
+
+/* 64 registers for Output Queue Packet Credits
+ * SLI_PKT(0..63)_SLIST_BAOFF_DBELL
+ */
+#define CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START 0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT(0..63)_SLIST_FIFO_RSIZE */
+#define CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START 0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT(0..63)_CNTS */
+#define CN23XX_SLI_PKT_CNTS_START 0x100B0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_OQ_OFFSET 0x20000
+
+/* ------- Output Queue Macros --------- */
+
+#define CN23XX_SLI_OQ_PKT_CONTROL(oq) \
+ (CN23XX_SLI_PKT_OUTPUT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN23XX_SLI_SLIST_BADDR_START64 + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_SIZE(oq) \
+ (CN23XX_SLI_PKT_SLIST_FIFO_RSIZE_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN23XX_SLI_PKT_OUT_SIZE + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_SENT(oq) \
+ (CN23XX_SLI_PKT_CNTS_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN23XX_SLI_PKT_SLIST_BAOFF_DBELL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+/* ------------------ Masks ---------------- */
+#define CN23XX_PKT_OUTPUT_CTL_IPTR (1 << 11)
+#define CN23XX_PKT_OUTPUT_CTL_ES (1 << 9)
+#define CN23XX_PKT_OUTPUT_CTL_NSR (1 << 8)
+#define CN23XX_PKT_OUTPUT_CTL_ROR (1 << 7)
+#define CN23XX_PKT_OUTPUT_CTL_DPTR (1 << 6)
+#define CN23XX_PKT_OUTPUT_CTL_BMODE (1 << 5)
+#define CN23XX_PKT_OUTPUT_CTL_ES_P (1 << 3)
+#define CN23XX_PKT_OUTPUT_CTL_NSR_P (1 << 2)
+#define CN23XX_PKT_OUTPUT_CTL_ROR_P (1 << 1)
+#define CN23XX_PKT_OUTPUT_CTL_RING_ENB (1 << 0)
+
+/* Rings per Virtual Function [RO] */
+#define CN23XX_PKT_INPUT_CTL_RPVF_MASK 0x3F
+#define CN23XX_PKT_INPUT_CTL_RPVF_POS 48
+
+/* These bits[47:44][RO] give the Physical function
+ * number info within the MAC
+ */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK 0x7
+
+/* These bits[43:32][RO] give the virtual function
+ * number info within the PF
+ */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK 0x1FFF
+
+/* ######################### Mailbox Reg Macros ######################## */
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200
+#define CN23XX_VF_SLI_PKT_MBOX_INT_START 0x10210
+
+#define CN23XX_SLI_MBOX_OFFSET 0x20000
+#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
+ (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \
+ ((q) * CN23XX_SLI_MBOX_OFFSET + \
+ (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+#define CN23XX_VF_SLI_PKT_MBOX_INT(q) \
+ (CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#endif /* _LIO_23XX_REG_H_ */
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.c b/drivers/net/liquidio/base/lio_23xx_vf.c
new file mode 100644
index 00000000..e30c20dc
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_23xx_vf.c
@@ -0,0 +1,588 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+
+#include "lio_logs.h"
+#include "lio_23xx_vf.h"
+#include "lio_23xx_reg.h"
+#include "lio_mbox.h"
+
+static int
+cn23xx_vf_reset_io_queues(struct lio_device *lio_dev, uint32_t num_queues)
+{
+ uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
+ uint64_t d64, q_no;
+ int ret_val = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ /* set RST bit to 1. This bit applies to both IQ and OQ */
+ d64 = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ d64);
+ }
+
+ /* wait until the RST bit is clear or the RST and QUIET bits are set */
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ volatile uint64_t reg_val;
+
+ reg_val = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ loop = loop - 1;
+ }
+
+ if (loop == 0) {
+ lio_dev_err(lio_dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %lu\n",
+ (unsigned long)q_no);
+ return -1;
+ }
+
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = lio_read_csr64(
+ lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ lio_dev_err(lio_dev,
+ "clearing the reset failed for qno: %lu\n",
+ (unsigned long)q_no);
+ ret_val = -1;
+ }
+ }
+
+ return ret_val;
+}
+
+static int
+cn23xx_vf_setup_global_input_regs(struct lio_device *lio_dev)
+{
+ uint64_t q_no;
+ uint64_t d64;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cn23xx_vf_reset_io_queues(lio_dev,
+ lio_dev->sriov_info.rings_per_vf))
+ return -1;
+
+ for (q_no = 0; q_no < (lio_dev->sriov_info.rings_per_vf); q_no++) {
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_DOORBELL(q_no),
+ 0xFFFFFFFF);
+
+ d64 = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_INSTR_COUNT64(q_no));
+
+ d64 &= 0xEFFFFFFFFFFFFFFFL;
+
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_INSTR_COUNT64(q_no),
+ d64);
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ * the Input Queues
+ */
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ CN23XX_PKT_INPUT_CTL_MASK);
+ }
+
+ return 0;
+}
+
+static void
+cn23xx_vf_setup_global_output_regs(struct lio_device *lio_dev)
+{
+ uint32_t reg_val;
+ uint32_t q_no;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
+ 0xFFFFFFFF);
+
+ reg_val =
+ lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKTS_SENT(q_no));
+
+ reg_val &= 0xEFFFFFFFFFFFFFFFL;
+
+ reg_val =
+ lio_read_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+
+ /* set IPTR & DPTR */
+ reg_val |=
+ (CN23XX_PKT_OUTPUT_CTL_IPTR | CN23XX_PKT_OUTPUT_CTL_DPTR);
+
+ /* reset BMODE */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Scatter List
+ * reset ROR_P, NSR_P
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Data
+ * reset ROR, NSR
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+ /* set the ES bit */
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+ /* write all the selected settings */
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+}
+
+static int
+cn23xx_vf_setup_device_regs(struct lio_device *lio_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (cn23xx_vf_setup_global_input_regs(lio_dev))
+ return -1;
+
+ cn23xx_vf_setup_global_output_regs(lio_dev);
+
+ return 0;
+}
+
+static void
+cn23xx_vf_setup_iq_regs(struct lio_device *lio_dev, uint32_t iq_no)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+ uint64_t pkt_in_done = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Write the start of the input queue's ring and its size */
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ lio_write_csr(lio_dev, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
+ lio_dev_dbg(lio_dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter (used in flush_iq
+ * calculation)
+ */
+ pkt_in_done = rte_read64(iq->inst_cnt_reg);
+
+ /* Clear the count by writing back what we read, but don't
+ * enable data traffic here
+ */
+ rte_write64(pkt_in_done, iq->inst_cnt_reg);
+}
+
+static void
+cn23xx_vf_setup_oq_regs(struct lio_device *lio_dev, uint32_t oq_no)
+{
+ struct lio_droq *droq = lio_dev->droq[oq_no];
+
+ PMD_INIT_FUNC_TRACE();
+
+ lio_write_csr64(lio_dev, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ lio_write_csr(lio_dev, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ (droq->buffer_size | (OCTEON_RH_SIZE << 16)));
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
+}
+
+static void
+cn23xx_vf_free_mbox(struct lio_device *lio_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ rte_free(lio_dev->mbox[0]);
+ lio_dev->mbox[0] = NULL;
+
+ rte_free(lio_dev->mbox);
+ lio_dev->mbox = NULL;
+}
+
+static int
+cn23xx_vf_setup_mbox(struct lio_device *lio_dev)
+{
+ struct lio_mbox *mbox;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (lio_dev->mbox == NULL) {
+ lio_dev->mbox = rte_zmalloc(NULL, sizeof(void *), 0);
+ if (lio_dev->mbox == NULL)
+ return -ENOMEM;
+ }
+
+ mbox = rte_zmalloc(NULL, sizeof(struct lio_mbox), 0);
+ if (mbox == NULL) {
+ rte_free(lio_dev->mbox);
+ lio_dev->mbox = NULL;
+ return -ENOMEM;
+ }
+
+ rte_spinlock_init(&mbox->lock);
+
+ mbox->lio_dev = lio_dev;
+
+ mbox->q_no = 0;
+
+ mbox->state = LIO_MBOX_STATE_IDLE;
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_VF_SLI_PKT_MBOX_INT(0);
+ /* VF reads from SIG0 reg */
+ mbox->mbox_read_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
+ /* VF writes into SIG1 reg */
+ mbox->mbox_write_reg = (uint8_t *)lio_dev->hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
+
+ lio_dev->mbox[0] = mbox;
+
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+
+ return 0;
+}
+
+static int
+cn23xx_vf_enable_io_queues(struct lio_device *lio_dev)
+{
+ uint32_t q_no;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < lio_dev->num_iqs; q_no++) {
+ uint64_t reg_val;
+
+ /* set the corresponding IQ IS_64B bit */
+ if (lio_dev->io_qmask.iq64B & (1ULL << q_no)) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
+ lio_write_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+
+ /* set the corresponding IQ ENB bit */
+ if (lio_dev->io_qmask.iq & (1ULL << q_no)) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
+ lio_write_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+ }
+ for (q_no = 0; q_no < lio_dev->num_oqs; q_no++) {
+ uint32_t reg_val;
+
+ /* set the corresponding OQ ENB bit */
+ if (lio_dev->io_qmask.oq & (1ULL << q_no)) {
+ reg_val = lio_read_csr(
+ lio_dev,
+ CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+ lio_write_csr(lio_dev,
+ CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+ }
+
+ return 0;
+}
+
+static void
+cn23xx_vf_disable_io_queues(struct lio_device *lio_dev)
+{
+ uint32_t num_queues;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* per HRM, rings can only be disabled via reset operation,
+ * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
+ */
+ num_queues = lio_dev->num_iqs;
+ if (num_queues < lio_dev->num_oqs)
+ num_queues = lio_dev->num_oqs;
+
+ cn23xx_vf_reset_io_queues(lio_dev, num_queues);
+}
+
+void
+cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev)
+{
+ struct lio_mbox_cmd mbox_cmd;
+
+ memset(&mbox_cmd, 0, sizeof(struct lio_mbox_cmd));
+ mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 0;
+ mbox_cmd.msg.s.cmd = LIO_VF_FLR_REQUEST;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = NULL;
+ mbox_cmd.fn_arg = 0;
+
+ lio_mbox_write(lio_dev, &mbox_cmd);
+}
+
+static void
+cn23xx_pfvf_hs_callback(struct lio_device *lio_dev,
+ struct lio_mbox_cmd *cmd, void *arg)
+{
+ uint32_t major = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ rte_memcpy((uint8_t *)&lio_dev->pfvf_hsword, cmd->msg.s.params, 6);
+ if (cmd->recv_len > 1) {
+ struct lio_version *lio_ver = (struct lio_version *)cmd->data;
+
+ major = lio_ver->major;
+ major = major << 16;
+ }
+
+ rte_atomic64_set((rte_atomic64_t *)arg, major | 1);
+}
+
+int
+cn23xx_pfvf_handshake(struct lio_device *lio_dev)
+{
+ struct lio_mbox_cmd mbox_cmd;
+ struct lio_version *lio_ver = (struct lio_version *)&mbox_cmd.data[0];
+ uint32_t q_no, count = 0;
+ rte_atomic64_t status;
+ uint32_t pfmajor;
+ uint32_t vfmajor;
+ uint32_t ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Sending VF_ACTIVE indication to the PF driver */
+ lio_dev_dbg(lio_dev, "requesting info from PF\n");
+
+ mbox_cmd.msg.mbox_msg64 = 0;
+ mbox_cmd.msg.s.type = LIO_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 1;
+ mbox_cmd.msg.s.cmd = LIO_VF_ACTIVE;
+ mbox_cmd.msg.s.len = 2;
+ mbox_cmd.data[0] = 0;
+ lio_ver->major = LIO_BASE_MAJOR_VERSION;
+ lio_ver->minor = LIO_BASE_MINOR_VERSION;
+ lio_ver->micro = LIO_BASE_MICRO_VERSION;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = (lio_mbox_callback)cn23xx_pfvf_hs_callback;
+ mbox_cmd.fn_arg = (void *)&status;
+
+ if (lio_mbox_write(lio_dev, &mbox_cmd)) {
+ lio_dev_err(lio_dev, "Write to mailbox failed\n");
+ return -1;
+ }
+
+ rte_atomic64_set(&status, 0);
+
+ do {
+ rte_delay_ms(1);
+ } while ((rte_atomic64_read(&status) == 0) && (count++ < 10000));
+
+ ret = rte_atomic64_read(&status);
+ if (ret == 0) {
+ lio_dev_err(lio_dev, "cn23xx_pfvf_handshake timeout\n");
+ return -1;
+ }
+
+ for (q_no = 0; q_no < lio_dev->num_iqs; q_no++)
+ lio_dev->instr_queue[q_no]->txpciq.s.pkind =
+ lio_dev->pfvf_hsword.pkind;
+
+ vfmajor = LIO_BASE_MAJOR_VERSION;
+ pfmajor = ret >> 16;
+ if (pfmajor != vfmajor) {
+ lio_dev_err(lio_dev,
+ "VF LiquidIO driver (major version %d) is not compatible with LiquidIO PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+ ret = -EPERM;
+ } else {
+ lio_dev_dbg(lio_dev,
+ "VF LiquidIO driver (major version %d), LiquidIO PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+ ret = 0;
+ }
+
+ lio_dev_dbg(lio_dev, "got data from PF pkind is %d\n",
+ lio_dev->pfvf_hsword.pkind);
+
+ return ret;
+}
+
+void
+cn23xx_vf_handle_mbox(struct lio_device *lio_dev)
+{
+ uint64_t mbox_int_val;
+
+ /* read and clear by writing 1 */
+ mbox_int_val = rte_read64(lio_dev->mbox[0]->mbox_int_reg);
+ rte_write64(mbox_int_val, lio_dev->mbox[0]->mbox_int_reg);
+ if (lio_mbox_read(lio_dev->mbox[0]))
+ lio_mbox_process_message(lio_dev->mbox[0]);
+}
+
+int
+cn23xx_vf_setup_device(struct lio_device *lio_dev)
+{
+ uint64_t reg_val;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* INPUT_CONTROL[RPVF] gives the VF IOq count */
+ reg_val = lio_read_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(0));
+
+ lio_dev->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
+ lio_dev->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
+
+ reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+
+ lio_dev->sriov_info.rings_per_vf =
+ reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+
+ lio_dev->default_config = lio_get_conf(lio_dev);
+ if (lio_dev->default_config == NULL)
+ return -1;
+
+ lio_dev->fn_list.setup_iq_regs = cn23xx_vf_setup_iq_regs;
+ lio_dev->fn_list.setup_oq_regs = cn23xx_vf_setup_oq_regs;
+ lio_dev->fn_list.setup_mbox = cn23xx_vf_setup_mbox;
+ lio_dev->fn_list.free_mbox = cn23xx_vf_free_mbox;
+
+ lio_dev->fn_list.setup_device_regs = cn23xx_vf_setup_device_regs;
+
+ lio_dev->fn_list.enable_io_queues = cn23xx_vf_enable_io_queues;
+ lio_dev->fn_list.disable_io_queues = cn23xx_vf_disable_io_queues;
+
+ return 0;
+}
+
+int
+cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev)
+{
+ uint32_t loop = CN23XX_VF_BUSY_READING_REG_LOOP_COUNT;
+ uint64_t q_no;
+
+ /* Disable the i/p and o/p queues for this Octeon.
+ * IOQs will already be in reset.
+ * If RST bit is set, wait for Quiet bit to be set
+ * Once Quiet bit is set, clear the RST bit
+ */
+ PMD_INIT_FUNC_TRACE();
+
+ for (q_no = 0; q_no < lio_dev->sriov_info.rings_per_vf; q_no++) {
+ volatile uint64_t reg_val;
+
+ reg_val = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && !(reg_val &
+ CN23XX_PKT_INPUT_CTL_QUIET) && loop) {
+ reg_val = lio_read_csr64(
+ lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ loop = loop - 1;
+ }
+
+ if (loop == 0) {
+ lio_dev_err(lio_dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno %lu\n",
+ (unsigned long)q_no);
+ return -1;
+ }
+
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ lio_write_csr64(lio_dev, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = lio_read_csr64(lio_dev,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ lio_dev_err(lio_dev, "unable to reset qno %lu\n",
+ (unsigned long)q_no);
+ return -1;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/liquidio/base/lio_23xx_vf.h b/drivers/net/liquidio/base/lio_23xx_vf.h
new file mode 100644
index 00000000..ad8db0df
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_23xx_vf.h
@@ -0,0 +1,97 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_23XX_VF_H_
+#define _LIO_23XX_VF_H_
+
+#include <stdio.h>
+
+#include "lio_struct.h"
+
+static const struct lio_config default_cn23xx_conf = {
+ .card_type = LIO_23XX,
+ .card_name = LIO_23XX_NAME,
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN23XX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN23XX_MAX_IQ_DESCRIPTORS * CN23XX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ },
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN23XX_CFG_IO_QUEUES,
+ .info_ptr = OCTEON_OQ_INFOPTR_MODE,
+ .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
+ },
+
+ .num_nic_ports = CN23XX_DEFAULT_NUM_PORTS,
+ .num_def_rx_descs = CN23XX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN23XX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN23XX_OQ_BUF_SIZE,
+};
+
+static inline const struct lio_config *
+lio_get_conf(struct lio_device *lio_dev)
+{
+ const struct lio_config *default_lio_conf = NULL;
+
+ /* check the LIO Device model & return the corresponding lio
+ * configuration
+ */
+ default_lio_conf = &default_cn23xx_conf;
+
+ if (default_lio_conf == NULL) {
+ lio_dev_err(lio_dev, "Configuration verification failed\n");
+ return NULL;
+ }
+
+ return default_lio_conf;
+}
+
+/** Turns off the input and output queues for the device
+ * @param lio_dev which device io queues to disable
+ */
+int cn23xx_vf_set_io_queues_off(struct lio_device *lio_dev);
+
+#define CN23XX_VF_BUSY_READING_REG_LOOP_COUNT 100000
+
+void cn23xx_vf_ask_pf_to_do_flr(struct lio_device *lio_dev);
+
+int cn23xx_pfvf_handshake(struct lio_device *lio_dev);
+
+int cn23xx_vf_setup_device(struct lio_device *lio_dev);
+
+void cn23xx_vf_handle_mbox(struct lio_device *lio_dev);
+#endif /* _LIO_23XX_VF_H_ */
diff --git a/drivers/net/liquidio/base/lio_hw_defs.h b/drivers/net/liquidio/base/lio_hw_defs.h
new file mode 100644
index 00000000..67eaa452
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_hw_defs.h
@@ -0,0 +1,249 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_HW_DEFS_H_
+#define _LIO_HW_DEFS_H_
+
+#include <rte_io.h>
+
+#ifndef PCI_VENDOR_ID_CAVIUM
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#endif
+
+#define LIO_CN23XX_VF_VID 0x9712
+
+/* --------------------------CONFIG VALUES------------------------ */
+
+/* CN23xx IQ configuration macros */
+#define CN23XX_MAX_RINGS_PER_PF 64
+#define CN23XX_MAX_RINGS_PER_VF 8
+
+#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_IQ_DESCRIPTORS 512
+#define CN23XX_MIN_IQ_DESCRIPTORS 128
+
+#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_OQ_DESCRIPTORS 512
+#define CN23XX_MIN_OQ_DESCRIPTORS 128
+#define CN23XX_OQ_BUF_SIZE 1536
+
+#define CN23XX_OQ_REFIL_THRESHOLD 16
+
+#define CN23XX_DEFAULT_NUM_PORTS 1
+
+#define CN23XX_CFG_IO_QUEUES CN23XX_MAX_RINGS_PER_PF
+
+/* common OCTEON configuration macros */
+#define OCTEON_64BYTE_INSTR 64
+#define OCTEON_OQ_INFOPTR_MODE 1
+
+/* Max IOQs per LIO Link */
+#define LIO_MAX_IOQS_PER_IF 64
+
+enum lio_card_type {
+ LIO_23XX /* 23xx */
+};
+
+#define LIO_23XX_NAME "23xx"
+
+#define LIO_DEV_RUNNING 0xc
+
+#define LIO_OQ_REFILL_THRESHOLD_CFG(cfg) \
+ ((cfg)->default_config->oq.refill_threshold)
+#define LIO_NUM_DEF_TX_DESCS_CFG(cfg) \
+ ((cfg)->default_config->num_def_tx_descs)
+
+#define LIO_IQ_INSTR_TYPE(cfg) ((cfg)->default_config->iq.instr_type)
+
+/* The following config values are fixed and should not be modified. */
+
+/* Maximum number of Instruction queues */
+#define LIO_MAX_INSTR_QUEUES(lio_dev) CN23XX_MAX_RINGS_PER_VF
+
+#define LIO_MAX_POSSIBLE_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES
+#define LIO_MAX_POSSIBLE_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES
+
+#define LIO_DEVICE_NAME_LEN 32
+#define LIO_BASE_MAJOR_VERSION 1
+#define LIO_BASE_MINOR_VERSION 5
+#define LIO_BASE_MICRO_VERSION 1
+
+#define LIO_FW_VERSION_LENGTH 32
+
+/** Tag types used by Octeon cores in its work. */
+enum octeon_tag_type {
+ OCTEON_ORDERED_TAG = 0,
+ OCTEON_ATOMIC_TAG = 1,
+};
+
+/* pre-defined host->NIC tag values */
+#define LIO_CONTROL (0x11111110)
+#define LIO_DATA(i) (0x11111111 + (i))
+
+/* used for NIC operations */
+#define LIO_OPCODE 1
+
+/* Subcodes are used by host driver/apps to identify the sub-operation
+ * for the core. They only need to by unique for a given subsystem.
+ */
+#define LIO_OPCODE_SUBCODE(op, sub) \
+ ((((op) & 0x0f) << 8) | ((sub) & 0x7f))
+
+/** LIO_OPCODE subcodes */
+/* This subcode is sent by core PCI driver to indicate cores are ready. */
+#define LIO_OPCODE_NW_DATA 0x02 /* network packet data */
+#define LIO_OPCODE_CMD 0x03
+#define LIO_OPCODE_INFO 0x04
+#define LIO_OPCODE_PORT_STATS 0x05
+#define LIO_OPCODE_IF_CFG 0x09
+
+#define LIO_MIN_RX_BUF_SIZE 64
+#define LIO_MAX_RX_PKTLEN (64 * 1024)
+
+/* NIC Command types */
+#define LIO_CMD_CHANGE_DEVFLAGS 0x3
+#define LIO_CMD_RX_CTL 0x4
+#define LIO_CMD_CLEAR_STATS 0x6
+#define LIO_CMD_SET_RSS 0xD
+#define LIO_CMD_TNL_RX_CSUM_CTL 0x10
+#define LIO_CMD_TNL_TX_CSUM_CTL 0x11
+#define LIO_CMD_ADD_VLAN_FILTER 0x17
+#define LIO_CMD_DEL_VLAN_FILTER 0x18
+#define LIO_CMD_VXLAN_PORT_CONFIG 0x19
+
+#define LIO_CMD_VXLAN_PORT_ADD 0x0
+#define LIO_CMD_VXLAN_PORT_DEL 0x1
+#define LIO_CMD_RXCSUM_ENABLE 0x0
+#define LIO_CMD_TXCSUM_ENABLE 0x0
+
+/* RX(packets coming from wire) Checksum verification flags */
+/* TCP/UDP csum */
+#define LIO_L4_CSUM_VERIFIED 0x1
+#define LIO_IP_CSUM_VERIFIED 0x2
+
+/* RSS */
+#define LIO_RSS_PARAM_DISABLE_RSS 0x10
+#define LIO_RSS_PARAM_HASH_KEY_UNCHANGED 0x08
+#define LIO_RSS_PARAM_ITABLE_UNCHANGED 0x04
+#define LIO_RSS_PARAM_HASH_INFO_UNCHANGED 0x02
+
+#define LIO_RSS_HASH_IPV4 0x100
+#define LIO_RSS_HASH_TCP_IPV4 0x200
+#define LIO_RSS_HASH_IPV6 0x400
+#define LIO_RSS_HASH_TCP_IPV6 0x1000
+#define LIO_RSS_HASH_IPV6_EX 0x800
+#define LIO_RSS_HASH_TCP_IPV6_EX 0x2000
+
+#define LIO_RSS_OFFLOAD_ALL ( \
+ LIO_RSS_HASH_IPV4 | \
+ LIO_RSS_HASH_TCP_IPV4 | \
+ LIO_RSS_HASH_IPV6 | \
+ LIO_RSS_HASH_TCP_IPV6 | \
+ LIO_RSS_HASH_IPV6_EX | \
+ LIO_RSS_HASH_TCP_IPV6_EX)
+
+#define LIO_RSS_MAX_TABLE_SZ 128
+#define LIO_RSS_MAX_KEY_SZ 40
+#define LIO_RSS_PARAM_SIZE 16
+
+/* Interface flags communicated between host driver and core app. */
+enum lio_ifflags {
+ LIO_IFFLAG_ALLMULTI = 0x02,
+ LIO_IFFLAG_UNICAST = 0x10
+};
+
+/* Routines for reading and writing CSRs */
+#ifdef RTE_LIBRTE_LIO_DEBUG_REGS
+#define lio_write_csr(lio_dev, reg_off, value) \
+ do { \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ typeof(value) _value = value; \
+ PMD_REGS_LOG(_dev, \
+ "Write32: Reg: 0x%08lx Val: 0x%08lx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long)_value); \
+ rte_write32(_value, _dev->hw_addr + _reg_off); \
+ } while (0)
+
+#define lio_write_csr64(lio_dev, reg_off, val64) \
+ do { \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ typeof(val64) _val64 = val64; \
+ PMD_REGS_LOG( \
+ _dev, \
+ "Write64: Reg: 0x%08lx Val: 0x%016llx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long long)_val64); \
+ rte_write64(_val64, _dev->hw_addr + _reg_off); \
+ } while (0)
+
+#define lio_read_csr(lio_dev, reg_off) \
+ ({ \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ uint32_t val = rte_read32(_dev->hw_addr + _reg_off); \
+ PMD_REGS_LOG(_dev, \
+ "Read32: Reg: 0x%08lx Val: 0x%08lx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long)val); \
+ val; \
+ })
+
+#define lio_read_csr64(lio_dev, reg_off) \
+ ({ \
+ typeof(lio_dev) _dev = lio_dev; \
+ typeof(reg_off) _reg_off = reg_off; \
+ uint64_t val64 = rte_read64(_dev->hw_addr + _reg_off); \
+ PMD_REGS_LOG( \
+ _dev, \
+ "Read64: Reg: 0x%08lx Val: 0x%016llx\n", \
+ (unsigned long)_reg_off, \
+ (unsigned long long)val64); \
+ val64; \
+ })
+#else
+#define lio_write_csr(lio_dev, reg_off, value) \
+ rte_write32(value, (lio_dev)->hw_addr + (reg_off))
+
+#define lio_write_csr64(lio_dev, reg_off, val64) \
+ rte_write64(val64, (lio_dev)->hw_addr + (reg_off))
+
+#define lio_read_csr(lio_dev, reg_off) \
+ rte_read32((lio_dev)->hw_addr + (reg_off))
+
+#define lio_read_csr64(lio_dev, reg_off) \
+ rte_read64((lio_dev)->hw_addr + (reg_off))
+#endif
+#endif /* _LIO_HW_DEFS_H_ */
diff --git a/drivers/net/liquidio/base/lio_mbox.c b/drivers/net/liquidio/base/lio_mbox.c
new file mode 100644
index 00000000..b4abc623
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_mbox.c
@@ -0,0 +1,275 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+
+#include "lio_logs.h"
+#include "lio_struct.h"
+#include "lio_mbox.h"
+
+/**
+ * lio_mbox_read:
+ * @mbox: Pointer mailbox
+ *
+ * Reads the 8-bytes of data from the mbox register
+ * Writes back the acknowledgment indicating completion of read
+ */
+int
+lio_mbox_read(struct lio_mbox *mbox)
+{
+ union lio_mbox_message msg;
+ int ret = 0;
+
+ msg.mbox_msg64 = rte_read64(mbox->mbox_read_reg);
+
+ if ((msg.mbox_msg64 == LIO_PFVFACK) || (msg.mbox_msg64 == LIO_PFVFSIG))
+ return 0;
+
+ if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {
+ mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] =
+ msg.mbox_msg64;
+ mbox->mbox_req.recv_len++;
+ } else {
+ if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {
+ mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] =
+ msg.mbox_msg64;
+ mbox->mbox_resp.recv_len++;
+ } else {
+ if ((mbox->state & LIO_MBOX_STATE_IDLE) &&
+ (msg.s.type == LIO_MBOX_REQUEST)) {
+ mbox->state &= ~LIO_MBOX_STATE_IDLE;
+ mbox->state |= LIO_MBOX_STATE_REQ_RECEIVING;
+ mbox->mbox_req.msg.mbox_msg64 = msg.mbox_msg64;
+ mbox->mbox_req.q_no = mbox->q_no;
+ mbox->mbox_req.recv_len = 1;
+ } else {
+ if ((mbox->state &
+ LIO_MBOX_STATE_RES_PENDING) &&
+ (msg.s.type == LIO_MBOX_RESPONSE)) {
+ mbox->state &=
+ ~LIO_MBOX_STATE_RES_PENDING;
+ mbox->state |=
+ LIO_MBOX_STATE_RES_RECEIVING;
+ mbox->mbox_resp.msg.mbox_msg64 =
+ msg.mbox_msg64;
+ mbox->mbox_resp.q_no = mbox->q_no;
+ mbox->mbox_resp.recv_len = 1;
+ } else {
+ rte_write64(LIO_PFVFERR,
+ mbox->mbox_read_reg);
+ mbox->state |= LIO_MBOX_STATE_ERROR;
+ return -1;
+ }
+ }
+ }
+ }
+
+ if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVING) {
+ if (mbox->mbox_req.recv_len < msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVING;
+ mbox->state |= LIO_MBOX_STATE_REQ_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ if (mbox->state & LIO_MBOX_STATE_RES_RECEIVING) {
+ if (mbox->mbox_resp.recv_len < msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &= ~LIO_MBOX_STATE_RES_RECEIVING;
+ mbox->state |= LIO_MBOX_STATE_RES_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ RTE_ASSERT(0);
+ }
+ }
+
+ rte_write64(LIO_PFVFACK, mbox->mbox_read_reg);
+
+ return ret;
+}
+
+/**
+ * lio_mbox_write:
+ * @lio_dev: Pointer lio device
+ * @mbox_cmd: Cmd to send to mailbox.
+ *
+ * Populates the queue specific mbox structure
+ * with cmd information.
+ * Write the cmd to mbox register
+ */
+int
+lio_mbox_write(struct lio_device *lio_dev,
+ struct lio_mbox_cmd *mbox_cmd)
+{
+ struct lio_mbox *mbox = lio_dev->mbox[mbox_cmd->q_no];
+ uint32_t count, i, ret = LIO_MBOX_STATUS_SUCCESS;
+
+ if ((mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) &&
+ !(mbox->state & LIO_MBOX_STATE_REQ_RECEIVED))
+ return LIO_MBOX_STATUS_FAILED;
+
+ if ((mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) &&
+ !(mbox->state & LIO_MBOX_STATE_IDLE))
+ return LIO_MBOX_STATUS_BUSY;
+
+ if (mbox_cmd->msg.s.type == LIO_MBOX_REQUEST) {
+ rte_memcpy(&mbox->mbox_resp, mbox_cmd,
+ sizeof(struct lio_mbox_cmd));
+ mbox->state = LIO_MBOX_STATE_RES_PENDING;
+ }
+
+ count = 0;
+
+ while (rte_read64(mbox->mbox_write_reg) != LIO_PFVFSIG) {
+ rte_delay_ms(1);
+ if (count++ == 1000) {
+ ret = LIO_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+
+ if (ret == LIO_MBOX_STATUS_SUCCESS) {
+ rte_write64(mbox_cmd->msg.mbox_msg64, mbox->mbox_write_reg);
+ for (i = 0; i < (uint32_t)(mbox_cmd->msg.s.len - 1); i++) {
+ count = 0;
+ while (rte_read64(mbox->mbox_write_reg) !=
+ LIO_PFVFACK) {
+ rte_delay_ms(1);
+ if (count++ == 1000) {
+ ret = LIO_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+ rte_write64(mbox_cmd->data[i], mbox->mbox_write_reg);
+ }
+ }
+
+ if (mbox_cmd->msg.s.type == LIO_MBOX_RESPONSE) {
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ } else {
+ if ((!mbox_cmd->msg.s.resp_needed) ||
+ (ret == LIO_MBOX_STATUS_FAILED)) {
+ mbox->state &= ~LIO_MBOX_STATE_RES_PENDING;
+ if (!(mbox->state & (LIO_MBOX_STATE_REQ_RECEIVING |
+ LIO_MBOX_STATE_REQ_RECEIVED)))
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * lio_mbox_process_cmd:
+ * @mbox: Pointer mailbox
+ * @mbox_cmd: Pointer to command received
+ *
+ * Process the cmd received in mbox
+ */
+static int
+lio_mbox_process_cmd(struct lio_mbox *mbox,
+ struct lio_mbox_cmd *mbox_cmd)
+{
+ struct lio_device *lio_dev = mbox->lio_dev;
+
+ if (mbox_cmd->msg.s.cmd == LIO_CORES_CRASHED)
+ lio_dev_err(lio_dev, "Octeon core(s) crashed or got stuck!\n");
+
+ return 0;
+}
+
+/**
+ * Process the received mbox message.
+ */
+int
+lio_mbox_process_message(struct lio_mbox *mbox)
+{
+ struct lio_mbox_cmd mbox_cmd;
+
+ if (mbox->state & LIO_MBOX_STATE_ERROR) {
+ if (mbox->state & (LIO_MBOX_STATE_RES_PENDING |
+ LIO_MBOX_STATE_RES_RECEIVING)) {
+ rte_memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct lio_mbox_cmd));
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ mbox_cmd.recv_status = 1;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->lio_dev, &mbox_cmd,
+ mbox_cmd.fn_arg);
+
+ return 0;
+ }
+
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+
+ return 0;
+ }
+
+ if (mbox->state & LIO_MBOX_STATE_RES_RECEIVED) {
+ rte_memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct lio_mbox_cmd));
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ mbox_cmd.recv_status = 0;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->lio_dev, &mbox_cmd, mbox_cmd.fn_arg);
+
+ return 0;
+ }
+
+ if (mbox->state & LIO_MBOX_STATE_REQ_RECEIVED) {
+ rte_memcpy(&mbox_cmd, &mbox->mbox_req,
+ sizeof(struct lio_mbox_cmd));
+ if (!mbox_cmd.msg.s.resp_needed) {
+ mbox->state &= ~LIO_MBOX_STATE_REQ_RECEIVED;
+ if (!(mbox->state & LIO_MBOX_STATE_RES_PENDING))
+ mbox->state = LIO_MBOX_STATE_IDLE;
+ rte_write64(LIO_PFVFSIG, mbox->mbox_read_reg);
+ }
+
+ lio_mbox_process_cmd(mbox, &mbox_cmd);
+
+ return 0;
+ }
+
+ RTE_ASSERT(0);
+
+ return 0;
+}
diff --git a/drivers/net/liquidio/base/lio_mbox.h b/drivers/net/liquidio/base/lio_mbox.h
new file mode 100644
index 00000000..b0875d64
--- /dev/null
+++ b/drivers/net/liquidio/base/lio_mbox.h
@@ -0,0 +1,131 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_MBOX_H_
+#define _LIO_MBOX_H_
+
+#include <stdint.h>
+
+#include <rte_spinlock.h>
+
+/* Macros for Mail Box Communication */
+
+#define LIO_MBOX_DATA_MAX 32
+
+#define LIO_VF_ACTIVE 0x1
+#define LIO_VF_FLR_REQUEST 0x2
+#define LIO_CORES_CRASHED 0x3
+
+/* Macro for Read acknowledgment */
+#define LIO_PFVFACK 0xffffffffffffffff
+#define LIO_PFVFSIG 0x1122334455667788
+#define LIO_PFVFERR 0xDEADDEADDEADDEAD
+
+enum lio_mbox_cmd_status {
+ LIO_MBOX_STATUS_SUCCESS = 0,
+ LIO_MBOX_STATUS_FAILED = 1,
+ LIO_MBOX_STATUS_BUSY = 2
+};
+
+enum lio_mbox_message_type {
+ LIO_MBOX_REQUEST = 0,
+ LIO_MBOX_RESPONSE = 1
+};
+
+union lio_mbox_message {
+ uint64_t mbox_msg64;
+ struct {
+ uint16_t type : 1;
+ uint16_t resp_needed : 1;
+ uint16_t cmd : 6;
+ uint16_t len : 8;
+ uint8_t params[6];
+ } s;
+};
+
+typedef void (*lio_mbox_callback)(void *, void *, void *);
+
+struct lio_mbox_cmd {
+ union lio_mbox_message msg;
+ uint64_t data[LIO_MBOX_DATA_MAX];
+ uint32_t q_no;
+ uint32_t recv_len;
+ uint32_t recv_status;
+ lio_mbox_callback fn;
+ void *fn_arg;
+};
+
+enum lio_mbox_state {
+ LIO_MBOX_STATE_IDLE = 1,
+ LIO_MBOX_STATE_REQ_RECEIVING = 2,
+ LIO_MBOX_STATE_REQ_RECEIVED = 4,
+ LIO_MBOX_STATE_RES_PENDING = 8,
+ LIO_MBOX_STATE_RES_RECEIVING = 16,
+ LIO_MBOX_STATE_RES_RECEIVED = 16,
+ LIO_MBOX_STATE_ERROR = 32
+};
+
+struct lio_mbox {
+ /* A spinlock to protect access to this q_mbox. */
+ rte_spinlock_t lock;
+
+ struct lio_device *lio_dev;
+
+ uint32_t q_no;
+
+ enum lio_mbox_state state;
+
+ /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ void *mbox_int_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ void *mbox_write_reg;
+
+ /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF,
+ * SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ void *mbox_read_reg;
+
+ struct lio_mbox_cmd mbox_req;
+
+ struct lio_mbox_cmd mbox_resp;
+
+};
+
+int lio_mbox_read(struct lio_mbox *mbox);
+int lio_mbox_write(struct lio_device *lio_dev,
+ struct lio_mbox_cmd *mbox_cmd);
+int lio_mbox_process_message(struct lio_mbox *mbox);
+#endif /* _LIO_MBOX_H_ */
diff --git a/drivers/net/liquidio/lio_ethdev.c b/drivers/net/liquidio/lio_ethdev.c
new file mode 100644
index 00000000..436d25b0
--- /dev/null
+++ b/drivers/net/liquidio/lio_ethdev.c
@@ -0,0 +1,2058 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+#include <rte_alarm.h>
+
+#include "lio_logs.h"
+#include "lio_23xx_vf.h"
+#include "lio_ethdev.h"
+#include "lio_rxtx.h"
+
+/* Default RSS key in use */
+static uint8_t lio_rss_key[40] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static const struct rte_eth_desc_lim lio_rx_desc_lim = {
+ .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
+ .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
+ .nb_align = 1,
+};
+
+static const struct rte_eth_desc_lim lio_tx_desc_lim = {
+ .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
+ .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
+ .nb_align = 1,
+};
+
+/* Wait for control command to reach nic. */
+static uint16_t
+lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
+ struct lio_dev_ctrl_cmd *ctrl_cmd)
+{
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+
+ while ((ctrl_cmd->cond == 0) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+ rte_delay_ms(1);
+ }
+
+ return !timeout;
+}
+
+/**
+ * \brief Send Rx control command
+ * @param eth_dev Pointer to the structure rte_eth_dev
+ * @param start_stop whether to start or stop
+ */
+static int
+lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
+ ctrl_pkt.ncmd.s.param1 = start_stop;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send RX Control message\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "RX Control command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* store statistics names and its offset in stats structure */
+struct rte_lio_xstats_name_off {
+ char name[RTE_ETH_XSTATS_NAME_SIZE];
+ unsigned int offset;
+};
+
+static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
+ {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
+ {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
+ {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
+ {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
+ {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
+ {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
+ {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
+ {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
+ {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
+ {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
+ {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
+ {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
+ {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
+ {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_broadcast_pkts",
+ (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_multicast_pkts",
+ (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
+ total_collisions)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
+ sizeof(struct octeon_rx_stats)},
+ {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
+ sizeof(struct octeon_rx_stats)},
+};
+
+#define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
+
+/* Get hw stats of the port */
+static int
+lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
+ unsigned int n)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ struct octeon_link_stats *hw_stats;
+ struct lio_link_stats_resp *resp;
+ struct lio_soft_command *sc;
+ uint32_t resp_size;
+ unsigned int i;
+ int retval;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (n < LIO_NB_XSTATS)
+ return LIO_NB_XSTATS;
+
+ resp_size = sizeof(struct lio_link_stats_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ resp = (struct lio_link_stats_resp *)sc->virtrptr;
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_PORT_STATS, 0, 0, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
+ retval);
+ goto get_stats_fail;
+ }
+
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ lio_process_ordered_list(lio_dev);
+ rte_delay_ms(1);
+ }
+
+ retval = resp->status;
+ if (retval) {
+ lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
+ goto get_stats_fail;
+ }
+
+ lio_swap_8B_data((uint64_t *)(&resp->link_stats),
+ sizeof(struct octeon_link_stats) >> 3);
+
+ hw_stats = &resp->link_stats;
+
+ for (i = 0; i < LIO_NB_XSTATS; i++) {
+ xstats[i].id = i;
+ xstats[i].value =
+ *(uint64_t *)(((char *)hw_stats) +
+ rte_lio_stats_strings[i].offset);
+ }
+
+ lio_free_soft_command(sc);
+
+ return LIO_NB_XSTATS;
+
+get_stats_fail:
+ lio_free_soft_command(sc);
+
+ return -1;
+}
+
+static int
+lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned limit __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ unsigned int i;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (xstats_names == NULL)
+ return LIO_NB_XSTATS;
+
+ /* Note: limit checked in rte_eth_xstats_names() */
+
+ for (i = 0; i < LIO_NB_XSTATS; i++) {
+ snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
+ "%s", rte_lio_stats_strings[i].name);
+ }
+
+ return LIO_NB_XSTATS;
+}
+
+/* Reset hw stats for the port */
+static void
+lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send clear stats command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Clear stats command timed out\n");
+ return;
+ }
+
+ /* clear stored per queue stats */
+ RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset);
+ (*eth_dev->dev_ops->stats_reset)(eth_dev);
+}
+
+/* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
+static void
+lio_dev_stats_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_stats *stats)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_droq_stats *oq_stats;
+ struct lio_iq_stats *iq_stats;
+ struct lio_instr_queue *txq;
+ struct lio_droq *droq;
+ int i, iq_no, oq_no;
+ uint64_t bytes = 0;
+ uint64_t pkts = 0;
+ uint64_t drop = 0;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ iq_no = lio_dev->linfo.txpciq[i].s.q_no;
+ txq = lio_dev->instr_queue[iq_no];
+ if (txq != NULL) {
+ iq_stats = &txq->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+ }
+
+ stats->opackets = pkts;
+ stats->obytes = bytes;
+ stats->oerrors = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
+ droq = lio_dev->droq[oq_no];
+ if (droq != NULL) {
+ oq_stats = &droq->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+ }
+ stats->ibytes = bytes;
+ stats->ipackets = pkts;
+ stats->ierrors = drop;
+}
+
+static void
+lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_droq_stats *oq_stats;
+ struct lio_iq_stats *iq_stats;
+ struct lio_instr_queue *txq;
+ struct lio_droq *droq;
+ int i, iq_no, oq_no;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ iq_no = lio_dev->linfo.txpciq[i].s.q_no;
+ txq = lio_dev->instr_queue[iq_no];
+ if (txq != NULL) {
+ iq_stats = &txq->stats;
+ memset(iq_stats, 0, sizeof(struct lio_iq_stats));
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
+ droq = lio_dev->droq[oq_no];
+ if (droq != NULL) {
+ oq_stats = &droq->stats;
+ memset(oq_stats, 0, sizeof(struct lio_droq_stats));
+ }
+ }
+}
+
+static void
+lio_dev_info_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_dev_info *devinfo)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ devinfo->max_rx_queues = lio_dev->max_rx_queues;
+ devinfo->max_tx_queues = lio_dev->max_tx_queues;
+
+ devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
+ devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
+
+ devinfo->max_mac_addrs = 1;
+
+ devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_VLAN_STRIP);
+ devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
+
+ devinfo->rx_desc_lim = lio_rx_desc_lim;
+ devinfo->tx_desc_lim = lio_tx_desc_lim;
+
+ devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
+ devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
+ devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_TCP |
+ ETH_RSS_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_TCP |
+ ETH_RSS_IPV6_EX |
+ ETH_RSS_IPV6_TCP_EX);
+}
+
+static int
+lio_dev_validate_vf_mtu(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't check MTU\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ /* Limit the MTU to make sure the ethernet packets are between
+ * ETHER_MIN_MTU bytes and PF's MTU
+ */
+ if ((new_mtu < ETHER_MIN_MTU) ||
+ (new_mtu > lio_dev->linfo.link.s.mtu)) {
+ lio_dev_err(lio_dev, "Invalid MTU: %d\n", new_mtu);
+ lio_dev_err(lio_dev, "Valid range %d and %d\n",
+ ETHER_MIN_MTU, lio_dev->linfo.link.s.mtu);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct lio_rss_set *rss_param;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+ int i, j, index;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
+ lio_dev_err(lio_dev,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ reta_size, LIO_RSS_MAX_TABLE_SZ);
+ return -EINVAL;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
+ ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ rss_param->param.flags = 0xF;
+ rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
+ rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
+
+ for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
+ for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
+ if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
+ index = (i * RTE_RETA_GROUP_SIZE) + j;
+ rss_state->itable[index] = reta_conf[i].reta[j];
+ }
+ }
+ }
+
+ rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
+ memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
+
+ lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to set rss hash\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Set rss hash timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ int i, num;
+
+ if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
+ lio_dev_err(lio_dev,
+ "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
+ reta_size, LIO_RSS_MAX_TABLE_SZ);
+ return -EINVAL;
+ }
+
+ num = reta_size / RTE_RETA_GROUP_SIZE;
+
+ for (i = 0; i < num; i++) {
+ memcpy(reta_conf->reta,
+ &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
+ RTE_RETA_GROUP_SIZE);
+ reta_conf++;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ uint8_t *hash_key = NULL;
+ uint64_t rss_hf = 0;
+
+ if (rss_state->hash_disable) {
+ lio_dev_info(lio_dev, "RSS disabled in nic\n");
+ rss_conf->rss_hf = 0;
+ return 0;
+ }
+
+ /* Get key value */
+ hash_key = rss_conf->rss_key;
+ if (hash_key != NULL)
+ memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
+
+ if (rss_state->ip)
+ rss_hf |= ETH_RSS_IPV4;
+ if (rss_state->tcp_hash)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ if (rss_state->ipv6)
+ rss_hf |= ETH_RSS_IPV6;
+ if (rss_state->ipv6_tcp_hash)
+ rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
+ if (rss_state->ipv6_ex)
+ rss_hf |= ETH_RSS_IPV6_EX;
+ if (rss_state->ipv6_tcp_ex_hash)
+ rss_hf |= ETH_RSS_IPV6_TCP_EX;
+
+ rss_conf->rss_hf = rss_hf;
+
+ return 0;
+}
+
+static int
+lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct lio_rss_set *rss_param;
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
+ lio_dev->port_id);
+ return -EINVAL;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
+ ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ rss_param->param.flags = 0xF;
+
+ if (rss_conf->rss_key) {
+ rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
+ rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
+ rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
+ memcpy(rss_state->hash_key, rss_conf->rss_key,
+ rss_state->hash_key_size);
+ memcpy(rss_param->key, rss_state->hash_key,
+ rss_state->hash_key_size);
+ }
+
+ if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
+ /* Can't disable rss through hash flags,
+ * if it is enabled by default during init
+ */
+ if (!rss_state->hash_disable)
+ return -EINVAL;
+
+ /* This is for --disable-rss during testpmd launch */
+ rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
+ } else {
+ uint32_t hashinfo = 0;
+
+ /* Can't enable rss if disabled by default during init */
+ if (rss_state->hash_disable)
+ return -EINVAL;
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV4) {
+ hashinfo |= LIO_RSS_HASH_IPV4;
+ rss_state->ip = 1;
+ } else {
+ rss_state->ip = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV4;
+ rss_state->tcp_hash = 1;
+ } else {
+ rss_state->tcp_hash = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6) {
+ hashinfo |= LIO_RSS_HASH_IPV6;
+ rss_state->ipv6 = 1;
+ } else {
+ rss_state->ipv6 = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV6;
+ rss_state->ipv6_tcp_hash = 1;
+ } else {
+ rss_state->ipv6_tcp_hash = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
+ hashinfo |= LIO_RSS_HASH_IPV6_EX;
+ rss_state->ipv6_ex = 1;
+ } else {
+ rss_state->ipv6_ex = 0;
+ }
+
+ if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
+ hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
+ rss_state->ipv6_tcp_ex_hash = 1;
+ } else {
+ rss_state->ipv6_tcp_ex_hash = 0;
+ }
+
+ rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
+ rss_param->param.hashinfo = hashinfo;
+ }
+
+ lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to set rss hash\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Set rss hash timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Add vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Remove vxlan dest udp port for an interface.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param udp_tnl
+ * udp tunnel conf
+ *
+ * @return
+ * On success return 0
+ * On failure return -1
+ */
+static int
+lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *udp_tnl)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (udp_tnl == NULL)
+ return -EINVAL;
+
+ if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
+ lio_dev_err(lio_dev, "Unsupported tunnel type\n");
+ return -1;
+ }
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
+ ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
+ ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ if (lio_dev->linfo.vlan_is_admin_assigned)
+ return -EPERM;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = on ?
+ LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
+ ctrl_pkt.ncmd.s.param1 = vlan_id;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
+ lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
+ on ? "add" : "remove");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param eth_dev
+ * - Pointer to the structure rte_eth_dev to read from.
+ * - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, negative value.
+ */
+static inline int
+lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev,
+ struct rte_eth_link *link)
+{
+ struct rte_eth_link *dst = &eth_dev->data->dev_link;
+ struct rte_eth_link *src = link;
+
+ if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+ *(uint64_t *)src) == 0)
+ return -1;
+
+ return 0;
+}
+
+static uint64_t
+lio_hweight64(uint64_t w)
+{
+ uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
+
+ res =
+ (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
+ res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
+ res = res + (res >> 8);
+ res = res + (res >> 16);
+
+ return (res + (res >> 32)) & 0x00000000000000FFul;
+}
+
+static int
+lio_dev_link_update(struct rte_eth_dev *eth_dev,
+ int wait_to_complete __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_eth_link link, old;
+
+ /* Initialize */
+ link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ memset(&old, 0, sizeof(old));
+
+ /* Return what we found */
+ if (lio_dev->linfo.link.s.link_up == 0) {
+ /* Interface is down */
+ if (lio_dev_atomic_write_link_status(eth_dev, &link))
+ return -1;
+ if (link.link_status == old.link_status)
+ return -1;
+ return 0;
+ }
+
+ link.link_status = ETH_LINK_UP; /* Interface is up */
+ link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ switch (lio_dev->linfo.link.s.speed) {
+ case LIO_LINK_SPEED_10000:
+ link.link_speed = ETH_SPEED_NUM_10G;
+ break;
+ case LIO_LINK_SPEED_25000:
+ link.link_speed = ETH_SPEED_NUM_25G;
+ break;
+ default:
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ }
+
+ if (lio_dev_atomic_write_link_status(eth_dev, &link))
+ return -1;
+
+ if (link.link_status == old.link_status)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * \brief Net device enable, disable allmulticast
+ * @param eth_dev Pointer to the structure rte_eth_dev
+ */
+static void
+lio_change_dev_flag(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
+ ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send change flag message\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "Change dev flag command timed out\n");
+}
+
+static void
+lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
+ lio_dev->port_id);
+ return;
+ }
+
+ lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
+ lio_change_dev_flag(eth_dev);
+}
+
+static void
+lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct rte_eth_rss_reta_entry64 reta_conf[8];
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t i;
+
+ /* Configure the RSS key and the RSS protocols used to compute
+ * the RSS hash of input packets.
+ */
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
+ rss_state->hash_disable = 1;
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+ return;
+ }
+
+ if (rss_conf.rss_key == NULL)
+ rss_conf.rss_key = lio_rss_key; /* Default hash key */
+
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+
+ memset(reta_conf, 0, sizeof(reta_conf));
+ for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
+ uint8_t q_idx, conf_idx, reta_idx;
+
+ q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
+ i % eth_dev->data->nb_rx_queues : 0);
+ conf_idx = i / RTE_RETA_GROUP_SIZE;
+ reta_idx = i % RTE_RETA_GROUP_SIZE;
+ reta_conf[conf_idx].reta[reta_idx] = q_idx;
+ reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
+ }
+
+ lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
+}
+
+static void
+lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
+ struct rte_eth_rss_conf rss_conf;
+
+ switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
+ case ETH_MQ_RX_RSS:
+ lio_dev_rss_configure(eth_dev);
+ break;
+ case ETH_MQ_RX_NONE:
+ /* if mq_mode is none, disable rss mode. */
+ default:
+ memset(&rss_conf, 0, sizeof(rss_conf));
+ rss_state->hash_disable = 1;
+ lio_dev_rss_hash_update(eth_dev, &rss_conf);
+ }
+}
+
+/**
+ * Setup our receive queue/ringbuffer. This is the
+ * queue the Octeon uses to send us packets and
+ * responses. We are given a memory pool for our
+ * packet buffers that are used to populate the receive
+ * queue.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param q_no
+ * Queue number
+ * @param num_rx_descs
+ * Number of entries in the queue
+ * @param socket_id
+ * Where to allocate memory
+ * @param rx_conf
+ * Pointer to the struction rte_eth_rxconf
+ * @param mp
+ * Pointer to the packet pool
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -1
+ */
+static int
+lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_rx_descs, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint32_t fw_mapped_oq;
+ uint16_t buf_size;
+
+ if (q_no >= lio_dev->nb_rx_queues) {
+ lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+
+ lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
+
+ fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
+
+ if ((lio_dev->droq[fw_mapped_oq]) &&
+ (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
+ lio_dev_err(lio_dev,
+ "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
+ lio_dev->droq[fw_mapped_oq]->max_count);
+ return -ENOTSUP;
+ }
+
+ mbp_priv = rte_mempool_get_priv(mp);
+ buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
+ socket_id)) {
+ lio_dev_err(lio_dev, "droq allocation failed\n");
+ return -1;
+ }
+
+ eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
+
+ return 0;
+}
+
+/**
+ * Release the receive queue/ringbuffer. Called by
+ * the upper layers.
+ *
+ * @param rxq
+ * Opaque pointer to the receive queue to release
+ *
+ * @return
+ * - nothing
+ */
+void
+lio_dev_rx_queue_release(void *rxq)
+{
+ struct lio_droq *droq = rxq;
+ int oq_no;
+
+ if (droq) {
+ /* Run time queue deletion not supported */
+ if (droq->lio_dev->port_configured)
+ return;
+
+ oq_no = droq->q_no;
+ lio_delete_droq_queue(droq->lio_dev, oq_no);
+ }
+}
+
+/**
+ * Allocate and initialize SW ring. Initialize associated HW registers.
+ *
+ * @param eth_dev
+ * Pointer to structure rte_eth_dev
+ *
+ * @param q_no
+ * Queue number
+ *
+ * @param num_tx_descs
+ * Number of ringbuffer descriptors
+ *
+ * @param socket_id
+ * NUMA socket id, used for memory allocations
+ *
+ * @param tx_conf
+ * Pointer to the structure rte_eth_txconf
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -errno value
+ */
+static int
+lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_tx_descs, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
+ int retval;
+
+ if (q_no >= lio_dev->nb_tx_queues) {
+ lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+
+ lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
+
+ if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
+ (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
+ lio_dev_err(lio_dev,
+ "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
+ lio_dev->instr_queue[fw_mapped_iq]->max_count);
+ return -ENOTSUP;
+ }
+
+ retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
+ num_tx_descs, lio_dev, socket_id);
+
+ if (retval) {
+ lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
+ return retval;
+ }
+
+ retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
+ lio_dev->instr_queue[fw_mapped_iq]->max_count,
+ socket_id);
+
+ if (retval) {
+ lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
+ return retval;
+ }
+
+ eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
+
+ return 0;
+}
+
+/**
+ * Release the transmit queue/ringbuffer. Called by
+ * the upper layers.
+ *
+ * @param txq
+ * Opaque pointer to the transmit queue to release
+ *
+ * @return
+ * - nothing
+ */
+void
+lio_dev_tx_queue_release(void *txq)
+{
+ struct lio_instr_queue *tq = txq;
+ uint32_t fw_mapped_iq_no;
+
+
+ if (tq) {
+ /* Run time queue deletion not supported */
+ if (tq->lio_dev->port_configured)
+ return;
+
+ /* Free sg_list */
+ lio_delete_sglist(tq);
+
+ fw_mapped_iq_no = tq->txpciq.s.q_no;
+ lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
+ }
+}
+
+/**
+ * Api to check link state.
+ */
+static void
+lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ struct lio_link_status_resp *resp;
+ union octeon_link_status *ls;
+ struct lio_soft_command *sc;
+ uint32_t resp_size;
+
+ if (!lio_dev->intf_open)
+ return;
+
+ resp_size = sizeof(struct lio_link_status_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return;
+
+ resp = (struct lio_link_status_resp *)sc->virtrptr;
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_INFO, 0, 0, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
+ goto get_status_fail;
+
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ rte_delay_ms(1);
+ }
+
+ if (resp->status)
+ goto get_status_fail;
+
+ ls = &resp->link_info.link;
+
+ lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
+
+ if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
+ lio_dev->linfo.link.link_status64 = ls->link_status64;
+ lio_dev_link_update(eth_dev, 0);
+ }
+
+ lio_free_soft_command(sc);
+
+ return;
+
+get_status_fail:
+ lio_free_soft_command(sc);
+}
+
+/* This function will be invoked every LSC_TIMEOUT ns (100ms)
+ * and will update link state if it changes.
+ */
+static void
+lio_sync_link_state_check(void *eth_dev)
+{
+ struct lio_device *lio_dev =
+ (((struct rte_eth_dev *)eth_dev)->data->dev_private);
+
+ if (lio_dev->port_configured)
+ lio_dev_get_link_status(eth_dev);
+
+ /* Schedule periodic link status check.
+ * Stop check if interface is close and start again while opening.
+ */
+ if (lio_dev->intf_open)
+ rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
+ eth_dev);
+}
+
+static int
+lio_dev_start(struct rte_eth_dev *eth_dev)
+{
+ uint16_t mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ int ret = 0;
+
+ lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
+
+ if (lio_dev->fn_list.enable_io_queues(lio_dev))
+ return -1;
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 1))
+ return -1;
+
+ /* Ready for link status updates */
+ lio_dev->intf_open = 1;
+ rte_mb();
+
+ /* Configure RSS if device configured with multiple RX queues. */
+ lio_dev_mq_rx_configure(eth_dev);
+
+ /* start polling for lsc */
+ ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
+ lio_sync_link_state_check,
+ eth_dev);
+ if (ret) {
+ lio_dev_err(lio_dev,
+ "link state check handler creation failed\n");
+ goto dev_lsc_handle_error;
+ }
+
+ while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
+ rte_delay_ms(1);
+
+ if (lio_dev->linfo.link.link_status64 == 0) {
+ ret = -1;
+ goto dev_mtu_check_error;
+ }
+
+ if (lio_dev->linfo.link.s.mtu != mtu) {
+ ret = lio_dev_validate_vf_mtu(eth_dev, mtu);
+ if (ret)
+ goto dev_mtu_check_error;
+ }
+
+ return 0;
+
+dev_mtu_check_error:
+ rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
+
+dev_lsc_handle_error:
+ lio_dev->intf_open = 0;
+ lio_send_rx_ctrl_cmd(eth_dev, 0);
+
+ return ret;
+}
+
+/* Stop device and disable input/output functions */
+static void
+lio_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
+ lio_dev->intf_open = 0;
+ rte_mb();
+
+ /* Cancel callback if still running. */
+ rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
+
+ lio_send_rx_ctrl_cmd(eth_dev, 0);
+
+ /* Clear recorded link status */
+ lio_dev->linfo.link.link_status64 = 0;
+}
+
+static int
+lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
+ return 0;
+ }
+
+ if (lio_dev->linfo.link.s.link_up) {
+ lio_dev_info(lio_dev, "Link is already UP\n");
+ return 0;
+ }
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
+ lio_dev_err(lio_dev, "Unable to set Link UP\n");
+ return -1;
+ }
+
+ lio_dev->linfo.link.s.link_up = 1;
+ eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+
+ return 0;
+}
+
+static int
+lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ if (!lio_dev->intf_open) {
+ lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
+ return 0;
+ }
+
+ if (!lio_dev->linfo.link.s.link_up) {
+ lio_dev_info(lio_dev, "Link is already DOWN\n");
+ return 0;
+ }
+
+ lio_dev->linfo.link.s.link_up = 0;
+ eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
+
+ if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
+ lio_dev->linfo.link.s.link_up = 1;
+ eth_dev->data->dev_link.link_status = ETH_LINK_UP;
+ lio_dev_err(lio_dev, "Unable to set Link Down\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Reset and stop the device. This occurs on the first
+ * call to this routine. Subsequent calls will simply
+ * return. NB: This will require the NIC to be rebooted.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ *
+ * @return
+ * - nothing
+ */
+static void
+lio_dev_close(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint32_t i;
+
+ lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
+
+ if (lio_dev->intf_open)
+ lio_dev_stop(eth_dev);
+
+ lio_wait_for_instr_fetch(lio_dev);
+
+ lio_dev->fn_list.disable_io_queues(lio_dev);
+
+ cn23xx_vf_set_io_queues_off(lio_dev);
+
+ /* Reset iq regs (IQ_DBELL).
+ * Clear sli_pktx_cnts (OQ_PKTS_SENT).
+ */
+ for (i = 0; i < lio_dev->nb_rx_queues; i++) {
+ struct lio_droq *droq = lio_dev->droq[i];
+
+ if (droq == NULL)
+ break;
+
+ uint32_t pkt_count = rte_read32(droq->pkts_sent_reg);
+
+ lio_dev_dbg(lio_dev,
+ "pending oq count %u\n", pkt_count);
+ rte_write32(pkt_count, droq->pkts_sent_reg);
+ }
+
+ /* Do FLR for the VF */
+ cn23xx_vf_ask_pf_to_do_flr(lio_dev);
+
+ /* lio_free_mbox */
+ lio_dev->fn_list.free_mbox(lio_dev);
+
+ /* Free glist resources */
+ rte_free(lio_dev->glist_head);
+ rte_free(lio_dev->glist_lock);
+ lio_dev->glist_head = NULL;
+ lio_dev->glist_lock = NULL;
+
+ lio_dev->port_configured = 0;
+
+ /* Delete all queues */
+ lio_dev_clear_queues(eth_dev);
+}
+
+/**
+ * Enable tunnel rx checksum verification from firmware.
+ */
+static void
+lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
+ ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
+}
+
+/**
+ * Enable checksum calculation for inner packet in a tunnel.
+ */
+static void
+lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ struct lio_dev_ctrl_cmd ctrl_cmd;
+ struct lio_ctrl_pkt ctrl_pkt;
+
+ /* flush added to prevent cmd failure
+ * incase the queue is full
+ */
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
+
+ memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
+ memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
+
+ ctrl_cmd.eth_dev = eth_dev;
+ ctrl_cmd.cond = 0;
+
+ ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
+ ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
+ ctrl_pkt.ctrl_cmd = &ctrl_cmd;
+
+ if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
+ lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
+ return;
+ }
+
+ if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
+ lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
+}
+
+static int lio_dev_configure(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+ uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
+ int retval, num_iqueues, num_oqueues;
+ uint8_t mac[ETHER_ADDR_LEN], i;
+ struct lio_if_cfg_resp *resp;
+ struct lio_soft_command *sc;
+ union lio_if_cfg if_cfg;
+ uint32_t resp_size;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Re-configuring firmware not supported.
+ * Can't change tx/rx queues per port from initial value.
+ */
+ if (lio_dev->port_configured) {
+ if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
+ (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
+ lio_dev_err(lio_dev,
+ "rxq/txq re-conf not supported. Restart application with new value.\n");
+ return -ENOTSUP;
+ }
+ return 0;
+ }
+
+ lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
+ lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
+
+ resp_size = sizeof(struct lio_if_cfg_resp);
+ sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
+ if (sc == NULL)
+ return -ENOMEM;
+
+ resp = (struct lio_if_cfg_resp *)sc->virtrptr;
+
+ /* Firmware doesn't have capability to reconfigure the queues,
+ * Claim all queues, and use as many required
+ */
+ if_cfg.if_cfg64 = 0;
+ if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
+ if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
+ if_cfg.s.base_queue = 0;
+
+ if_cfg.s.gmx_port_id = lio_dev->pf_num;
+
+ lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
+ LIO_OPCODE_IF_CFG, 0,
+ if_cfg.if_cfg64, 0);
+
+ /* Setting wait time in seconds */
+ sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
+ retval);
+ /* Soft instr is freed by driver in case of failure. */
+ goto nic_config_fail;
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
+ lio_process_ordered_list(lio_dev);
+ rte_delay_ms(1);
+ }
+
+ retval = resp->status;
+ if (retval) {
+ lio_dev_err(lio_dev, "iq/oq config failed\n");
+ goto nic_config_fail;
+ }
+
+ lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
+ sizeof(struct octeon_if_cfg_info) >> 3);
+
+ num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
+ num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
+
+ if (!(num_iqueues) || !(num_oqueues)) {
+ lio_dev_err(lio_dev,
+ "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
+ (unsigned long)resp->cfg_info.iqmask,
+ (unsigned long)resp->cfg_info.oqmask);
+ goto nic_config_fail;
+ }
+
+ lio_dev_dbg(lio_dev,
+ "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
+ eth_dev->data->port_id,
+ (unsigned long)resp->cfg_info.iqmask,
+ (unsigned long)resp->cfg_info.oqmask,
+ num_iqueues, num_oqueues);
+
+ lio_dev->linfo.num_rxpciq = num_oqueues;
+ lio_dev->linfo.num_txpciq = num_iqueues;
+
+ for (i = 0; i < num_oqueues; i++) {
+ lio_dev->linfo.rxpciq[i].rxpciq64 =
+ resp->cfg_info.linfo.rxpciq[i].rxpciq64;
+ lio_dev_dbg(lio_dev, "index %d OQ %d\n",
+ i, lio_dev->linfo.rxpciq[i].s.q_no);
+ }
+
+ for (i = 0; i < num_iqueues; i++) {
+ lio_dev->linfo.txpciq[i].txpciq64 =
+ resp->cfg_info.linfo.txpciq[i].txpciq64;
+ lio_dev_dbg(lio_dev, "index %d IQ %d\n",
+ i, lio_dev->linfo.txpciq[i].s.q_no);
+ }
+
+ lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio_dev->linfo.link.link_status64 =
+ resp->cfg_info.linfo.link.link_status64;
+
+ /* 64-bit swap required on LE machines */
+ lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
+ for (i = 0; i < ETHER_ADDR_LEN; i++)
+ mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
+ 2 + i));
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy((struct ether_addr *)mac, &eth_dev->data->mac_addrs[0]);
+
+ /* enable firmware checksum support for tunnel packets */
+ lio_enable_hw_tunnel_rx_checksum(eth_dev);
+ lio_enable_hw_tunnel_tx_checksum(eth_dev);
+
+ lio_dev->glist_lock =
+ rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
+ if (lio_dev->glist_lock == NULL)
+ return -ENOMEM;
+
+ lio_dev->glist_head =
+ rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
+ 0);
+ if (lio_dev->glist_head == NULL) {
+ rte_free(lio_dev->glist_lock);
+ lio_dev->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio_dev_link_update(eth_dev, 0);
+
+ lio_dev->port_configured = 1;
+
+ lio_free_soft_command(sc);
+
+ /* Disable iq_0 for reconf */
+ lio_dev->fn_list.disable_io_queues(lio_dev);
+
+ /* Reset ioq regs */
+ lio_dev->fn_list.setup_device_regs(lio_dev);
+
+ /* Free iq_0 used during init */
+ lio_free_instr_queue0(lio_dev);
+
+ return 0;
+
+nic_config_fail:
+ lio_dev_err(lio_dev, "Failed retval %d\n", retval);
+ lio_free_soft_command(sc);
+ lio_free_instr_queue0(lio_dev);
+
+ return -ENODEV;
+}
+
+/* Define our ethernet definitions */
+static const struct eth_dev_ops liovf_eth_dev_ops = {
+ .dev_configure = lio_dev_configure,
+ .dev_start = lio_dev_start,
+ .dev_stop = lio_dev_stop,
+ .dev_set_link_up = lio_dev_set_link_up,
+ .dev_set_link_down = lio_dev_set_link_down,
+ .dev_close = lio_dev_close,
+ .allmulticast_enable = lio_dev_allmulticast_enable,
+ .allmulticast_disable = lio_dev_allmulticast_disable,
+ .link_update = lio_dev_link_update,
+ .stats_get = lio_dev_stats_get,
+ .xstats_get = lio_dev_xstats_get,
+ .xstats_get_names = lio_dev_xstats_get_names,
+ .stats_reset = lio_dev_stats_reset,
+ .xstats_reset = lio_dev_xstats_reset,
+ .dev_infos_get = lio_dev_info_get,
+ .vlan_filter_set = lio_dev_vlan_filter_set,
+ .rx_queue_setup = lio_dev_rx_queue_setup,
+ .rx_queue_release = lio_dev_rx_queue_release,
+ .tx_queue_setup = lio_dev_tx_queue_setup,
+ .tx_queue_release = lio_dev_tx_queue_release,
+ .reta_update = lio_dev_rss_reta_update,
+ .reta_query = lio_dev_rss_reta_query,
+ .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
+ .rss_hash_update = lio_dev_rss_hash_update,
+ .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
+ .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
+};
+
+static void
+lio_check_pf_hs_response(void *lio_dev)
+{
+ struct lio_device *dev = lio_dev;
+
+ /* check till response arrives */
+ if (dev->pfvf_hsword.coproc_tics_per_us)
+ return;
+
+ cn23xx_vf_handle_mbox(dev);
+
+ rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
+}
+
+/**
+ * \brief Identify the LIO device and to map the BAR address space
+ * @param lio_dev lio device
+ */
+static int
+lio_chip_specific_setup(struct lio_device *lio_dev)
+{
+ struct rte_pci_device *pdev = lio_dev->pci_dev;
+ uint32_t dev_id = pdev->id.device_id;
+ const char *s;
+ int ret = 1;
+
+ switch (dev_id) {
+ case LIO_CN23XX_VF_VID:
+ lio_dev->chip_id = LIO_CN23XX_VF_VID;
+ ret = cn23xx_vf_setup_device(lio_dev);
+ s = "CN23XX VF";
+ break;
+ default:
+ s = "?";
+ lio_dev_err(lio_dev, "Unsupported Chip\n");
+ }
+
+ if (!ret)
+ lio_dev_info(lio_dev, "DEVICE : %s\n", s);
+
+ return ret;
+}
+
+static int
+lio_first_time_init(struct lio_device *lio_dev,
+ struct rte_pci_device *pdev)
+{
+ int dpdk_queues;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* set dpdk specific pci device pointer */
+ lio_dev->pci_dev = pdev;
+
+ /* Identify the LIO type and set device ops */
+ if (lio_chip_specific_setup(lio_dev)) {
+ lio_dev_err(lio_dev, "Chip specific setup failed\n");
+ return -1;
+ }
+
+ /* Initialize soft command buffer pool */
+ if (lio_setup_sc_buffer_pool(lio_dev)) {
+ lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
+ return -1;
+ }
+
+ /* Initialize lists to manage the requests of different types that
+ * arrive from applications for this lio device.
+ */
+ lio_setup_response_list(lio_dev);
+
+ if (lio_dev->fn_list.setup_mbox(lio_dev)) {
+ lio_dev_err(lio_dev, "Mailbox setup failed\n");
+ goto error;
+ }
+
+ /* Check PF response */
+ lio_check_pf_hs_response((void *)lio_dev);
+
+ /* Do handshake and exit if incompatible PF driver */
+ if (cn23xx_pfvf_handshake(lio_dev))
+ goto error;
+
+ /* Initial reset */
+ cn23xx_vf_ask_pf_to_do_flr(lio_dev);
+ /* Wait for FLR for 100ms per SRIOV specification */
+ rte_delay_ms(100);
+
+ if (cn23xx_vf_set_io_queues_off(lio_dev)) {
+ lio_dev_err(lio_dev, "Setting io queues off failed\n");
+ goto error;
+ }
+
+ if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
+ lio_dev_err(lio_dev, "Failed to configure device registers\n");
+ goto error;
+ }
+
+ if (lio_setup_instr_queue0(lio_dev)) {
+ lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
+ goto error;
+ }
+
+ dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
+
+ lio_dev->max_tx_queues = dpdk_queues;
+ lio_dev->max_rx_queues = dpdk_queues;
+
+ /* Enable input and output queues for this device */
+ if (lio_dev->fn_list.enable_io_queues(lio_dev))
+ goto error;
+
+ return 0;
+
+error:
+ lio_free_sc_buffer_pool(lio_dev);
+ if (lio_dev->mbox[0])
+ lio_dev->fn_list.free_mbox(lio_dev);
+ if (lio_dev->instr_queue[0])
+ lio_free_instr_queue0(lio_dev);
+
+ return -1;
+}
+
+static int
+lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ /* lio_free_sc_buffer_pool */
+ lio_free_sc_buffer_pool(lio_dev);
+
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ return 0;
+}
+
+static int
+lio_eth_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(eth_dev->device);
+ struct lio_device *lio_dev = LIO_DEV(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
+ eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
+
+ /* Primary does the initialization. */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ rte_eth_copy_pci_info(eth_dev, pdev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
+
+ if (pdev->mem_resource[0].addr) {
+ lio_dev->hw_addr = pdev->mem_resource[0].addr;
+ } else {
+ PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
+ return -ENODEV;
+ }
+
+ lio_dev->eth_dev = eth_dev;
+ /* set lio device print string */
+ snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
+ "%s[%02x:%02x.%x]", pdev->driver->driver.name,
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+
+ lio_dev->port_id = eth_dev->data->port_id;
+
+ if (lio_first_time_init(lio_dev, pdev)) {
+ lio_dev_err(lio_dev, "Device init failed\n");
+ return -EINVAL;
+ }
+
+ eth_dev->dev_ops = &liovf_eth_dev_ops;
+ eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
+ if (eth_dev->data->mac_addrs == NULL) {
+ lio_dev_err(lio_dev,
+ "MAC addresses memory allocation failed\n");
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ return -ENOMEM;
+ }
+
+ rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
+ rte_wmb();
+
+ lio_dev->port_configured = 0;
+ /* Always allow unicast packets */
+ lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
+
+ return 0;
+}
+
+static int
+lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_eth_dev *eth_dev;
+ int ret;
+
+ eth_dev = rte_eth_dev_pci_allocate(pci_dev,
+ sizeof(struct lio_device));
+ if (eth_dev == NULL)
+ return -ENOMEM;
+
+ ret = lio_eth_dev_init(eth_dev);
+ if (ret)
+ rte_eth_dev_pci_release(eth_dev);
+
+ return ret;
+}
+
+static int
+lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ lio_eth_dev_uninit);
+}
+
+/* Set of PCI devices this driver supports */
+static const struct rte_pci_id pci_id_liovf_map[] = {
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
+ { .vendor_id = 0, /* sentinel */ }
+};
+
+static struct rte_pci_driver rte_liovf_pmd = {
+ .id_table = pci_id_liovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = lio_eth_dev_pci_probe,
+ .remove = lio_eth_dev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio");
diff --git a/drivers/net/liquidio/lio_ethdev.h b/drivers/net/liquidio/lio_ethdev.h
new file mode 100644
index 00000000..655c2011
--- /dev/null
+++ b/drivers/net/liquidio/lio_ethdev.h
@@ -0,0 +1,205 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_ETHDEV_H_
+#define _LIO_ETHDEV_H_
+
+#include <stdint.h>
+
+#include "lio_struct.h"
+
+/* timeout to check link state updates from firmware in us */
+#define LIO_LSC_TIMEOUT 100000 /* 100000us (100ms) */
+#define LIO_MAX_CMD_TIMEOUT 10000 /* 10000ms (10s) */
+
+#define LIO_DEV(_eth_dev) ((_eth_dev)->data->dev_private)
+
+/* LIO Response condition variable */
+struct lio_dev_ctrl_cmd {
+ struct rte_eth_dev *eth_dev;
+ uint64_t cond;
+};
+
+enum lio_bus_speed {
+ LIO_LINK_SPEED_UNKNOWN = 0,
+ LIO_LINK_SPEED_10000 = 10000,
+ LIO_LINK_SPEED_25000 = 25000
+};
+
+struct octeon_if_cfg_info {
+ uint64_t iqmask; /** mask for IQs enabled for the port */
+ uint64_t oqmask; /** mask for OQs enabled for the port */
+ struct octeon_link_info linfo; /** initial link information */
+ char lio_firmware_version[LIO_FW_VERSION_LENGTH];
+};
+
+/** Stats for each NIC port in RX direction. */
+struct octeon_rx_stats {
+ /* link-level stats */
+ uint64_t total_rcvd;
+ uint64_t bytes_rcvd;
+ uint64_t total_bcst;
+ uint64_t total_mcst;
+ uint64_t runts;
+ uint64_t ctl_rcvd;
+ uint64_t fifo_err; /* Accounts for over/under-run of buffers */
+ uint64_t dmac_drop;
+ uint64_t fcs_err;
+ uint64_t jabber_err;
+ uint64_t l2_err;
+ uint64_t frame_err;
+
+ /* firmware stats */
+ uint64_t fw_total_rcvd;
+ uint64_t fw_total_fwd;
+ uint64_t fw_total_fwd_bytes;
+ uint64_t fw_err_pko;
+ uint64_t fw_err_link;
+ uint64_t fw_err_drop;
+ uint64_t fw_rx_vxlan;
+ uint64_t fw_rx_vxlan_err;
+
+ /* LRO */
+ uint64_t fw_lro_pkts; /* Number of packets that are LROed */
+ uint64_t fw_lro_octs; /* Number of octets that are LROed */
+ uint64_t fw_total_lro; /* Number of LRO packets formed */
+ uint64_t fw_lro_aborts; /* Number of times lRO of packet aborted */
+ uint64_t fw_lro_aborts_port;
+ uint64_t fw_lro_aborts_seq;
+ uint64_t fw_lro_aborts_tsval;
+ uint64_t fw_lro_aborts_timer;
+ /* intrmod: packet forward rate */
+ uint64_t fwd_rate;
+};
+
+/** Stats for each NIC port in RX direction. */
+struct octeon_tx_stats {
+ /* link-level stats */
+ uint64_t total_pkts_sent;
+ uint64_t total_bytes_sent;
+ uint64_t mcast_pkts_sent;
+ uint64_t bcast_pkts_sent;
+ uint64_t ctl_sent;
+ uint64_t one_collision_sent; /* Packets sent after one collision */
+ /* Packets sent after multiple collision */
+ uint64_t multi_collision_sent;
+ /* Packets not sent due to max collisions */
+ uint64_t max_collision_fail;
+ /* Packets not sent due to max deferrals */
+ uint64_t max_deferral_fail;
+ /* Accounts for over/under-run of buffers */
+ uint64_t fifo_err;
+ uint64_t runts;
+ uint64_t total_collisions; /* Total number of collisions detected */
+
+ /* firmware stats */
+ uint64_t fw_total_sent;
+ uint64_t fw_total_fwd;
+ uint64_t fw_total_fwd_bytes;
+ uint64_t fw_err_pko;
+ uint64_t fw_err_link;
+ uint64_t fw_err_drop;
+ uint64_t fw_err_tso;
+ uint64_t fw_tso; /* number of tso requests */
+ uint64_t fw_tso_fwd; /* number of packets segmented in tso */
+ uint64_t fw_tx_vxlan;
+};
+
+struct octeon_link_stats {
+ struct octeon_rx_stats fromwire;
+ struct octeon_tx_stats fromhost;
+};
+
+union lio_if_cfg {
+ uint64_t if_cfg64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t base_queue : 16;
+ uint64_t num_iqueues : 16;
+ uint64_t num_oqueues : 16;
+ uint64_t gmx_port_id : 8;
+ uint64_t vf_id : 8;
+#else
+ uint64_t vf_id : 8;
+ uint64_t gmx_port_id : 8;
+ uint64_t num_oqueues : 16;
+ uint64_t num_iqueues : 16;
+ uint64_t base_queue : 16;
+#endif
+ } s;
+};
+
+struct lio_if_cfg_resp {
+ uint64_t rh;
+ struct octeon_if_cfg_info cfg_info;
+ uint64_t status;
+};
+
+struct lio_link_stats_resp {
+ uint64_t rh;
+ struct octeon_link_stats link_stats;
+ uint64_t status;
+};
+
+struct lio_link_status_resp {
+ uint64_t rh;
+ struct octeon_link_info link_info;
+ uint64_t status;
+};
+
+struct lio_rss_set {
+ struct param {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint64_t flags : 16;
+ uint64_t hashinfo : 32;
+ uint64_t itablesize : 16;
+ uint64_t hashkeysize : 16;
+ uint64_t reserved : 48;
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t itablesize : 16;
+ uint64_t hashinfo : 32;
+ uint64_t flags : 16;
+ uint64_t reserved : 48;
+ uint64_t hashkeysize : 16;
+#endif
+ } param;
+
+ uint8_t itable[LIO_RSS_MAX_TABLE_SZ];
+ uint8_t key[LIO_RSS_MAX_KEY_SZ];
+};
+
+void lio_dev_rx_queue_release(void *rxq);
+
+void lio_dev_tx_queue_release(void *txq);
+
+#endif /* _LIO_ETHDEV_H_ */
diff --git a/drivers/net/liquidio/lio_logs.h b/drivers/net/liquidio/lio_logs.h
new file mode 100644
index 00000000..a4c9ca4d
--- /dev/null
+++ b/drivers/net/liquidio/lio_logs.h
@@ -0,0 +1,91 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_LOGS_H_
+#define _LIO_LOGS_H_
+
+#define lio_dev_printf(lio_dev, level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s" fmt, (lio_dev)->dev_string, ##args)
+
+#define lio_dev_info(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, INFO, "INFO: " fmt, ##args)
+
+#define lio_dev_err(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, ERR, "ERROR: %s() " fmt, __func__, ##args)
+
+#define PMD_INIT_LOG(level, fmt, args...) RTE_LOG(level, PMD, fmt, ## args)
+
+/* Enable these through config options */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, "%s() >>\n", __func__)
+#else /* !RTE_LIBRTE_LIO_DEBUG_INIT */
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_INIT */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_DRIVER
+#define lio_dev_dbg(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, DEBUG, "DEBUG: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_DRIVER */
+#define lio_dev_dbg(lio_dev, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_DRIVER */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_RX
+#define PMD_RX_LOG(lio_dev, level, fmt, args...) \
+ lio_dev_printf(lio_dev, level, "RX: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_RX */
+#define PMD_RX_LOG(lio_dev, level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_RX */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_TX
+#define PMD_TX_LOG(lio_dev, level, fmt, args...) \
+ lio_dev_printf(lio_dev, level, "TX: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_TX */
+#define PMD_TX_LOG(lio_dev, level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_TX */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_MBOX
+#define PMD_MBOX_LOG(lio_dev, level, fmt, args...) \
+ lio_dev_printf(lio_dev, level, "MBOX: %s() " fmt, __func__, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_MBOX */
+#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_MBOX */
+
+#ifdef RTE_LIBRTE_LIO_DEBUG_REGS
+#define PMD_REGS_LOG(lio_dev, fmt, args...) \
+ lio_dev_printf(lio_dev, DEBUG, "REGS: " fmt, ##args)
+#else /* !RTE_LIBRTE_LIO_DEBUG_REGS */
+#define PMD_REGS_LOG(level, fmt, args...) do { } while (0)
+#endif /* RTE_LIBRTE_LIO_DEBUG_REGS */
+
+#endif /* _LIO_LOGS_H_ */
diff --git a/drivers/net/liquidio/lio_rxtx.c b/drivers/net/liquidio/lio_rxtx.c
new file mode 100644
index 00000000..9533015c
--- /dev/null
+++ b/drivers/net/liquidio/lio_rxtx.c
@@ -0,0 +1,1885 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_malloc.h>
+
+#include "lio_logs.h"
+#include "lio_struct.h"
+#include "lio_ethdev.h"
+#include "lio_rxtx.h"
+
+#define LIO_MAX_SG 12
+/* Flush iq if available tx_desc fall below LIO_FLUSH_WM */
+#define LIO_FLUSH_WM(_iq) ((_iq)->max_count / 2)
+#define LIO_PKT_IN_DONE_CNT_MASK 0x00000000FFFFFFFFULL
+
+static void
+lio_droq_compute_max_packet_bufs(struct lio_droq *droq)
+{
+ uint32_t count = 0;
+
+ do {
+ count += droq->buffer_size;
+ } while (count < LIO_MAX_RX_PKTLEN);
+}
+
+static void
+lio_droq_reset_indices(struct lio_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ rte_atomic64_set(&droq->pkts_pending, 0);
+}
+
+static void
+lio_droq_destroy_ring_buffers(struct lio_droq *droq)
+{
+ uint32_t i;
+
+ for (i = 0; i < droq->max_count; i++) {
+ if (droq->recv_buf_list[i].buffer) {
+ rte_pktmbuf_free((struct rte_mbuf *)
+ droq->recv_buf_list[i].buffer);
+ droq->recv_buf_list[i].buffer = NULL;
+ }
+ }
+
+ lio_droq_reset_indices(droq);
+}
+
+static void *
+lio_recv_buffer_alloc(struct lio_device *lio_dev, int q_no)
+{
+ struct lio_droq *droq = lio_dev->droq[q_no];
+ struct rte_mempool *mpool = droq->mpool;
+ struct rte_mbuf *m;
+
+ m = rte_pktmbuf_alloc(mpool);
+ if (m == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate\n");
+ return NULL;
+ }
+
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ m->nb_segs = 1;
+ m->pool = mpool;
+
+ return m;
+}
+
+static int
+lio_droq_setup_ring_buffers(struct lio_device *lio_dev,
+ struct lio_droq *droq)
+{
+ struct lio_droq_desc *desc_ring = droq->desc_ring;
+ uint32_t i;
+ void *buf;
+
+ for (i = 0; i < droq->max_count; i++) {
+ buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ if (buf == NULL) {
+ lio_dev_err(lio_dev, "buffer alloc failed\n");
+ droq->stats.rx_alloc_failure++;
+ lio_droq_destroy_ring_buffers(droq);
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[i].buffer = buf;
+ droq->info_list[i].length = 0;
+
+ /* map ring buffers into memory */
+ desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
+ desc_ring[i].buffer_ptr =
+ lio_map_ring(droq->recv_buf_list[i].buffer);
+ }
+
+ lio_droq_reset_indices(droq);
+
+ lio_droq_compute_max_packet_bufs(droq);
+
+ return 0;
+}
+
+static void
+lio_dma_zone_free(struct lio_device *lio_dev, const struct rte_memzone *mz)
+{
+ const struct rte_memzone *mz_tmp;
+ int ret = 0;
+
+ if (mz == NULL) {
+ lio_dev_err(lio_dev, "Memzone NULL\n");
+ return;
+ }
+
+ mz_tmp = rte_memzone_lookup(mz->name);
+ if (mz_tmp == NULL) {
+ lio_dev_err(lio_dev, "Memzone %s Not Found\n", mz->name);
+ return;
+ }
+
+ ret = rte_memzone_free(mz);
+ if (ret)
+ lio_dev_err(lio_dev, "Memzone free Failed ret %d\n", ret);
+}
+
+/**
+ * Frees the space for descriptor ring for the droq.
+ *
+ * @param lio_dev - pointer to the lio device structure
+ * @param q_no - droq no.
+ */
+static void
+lio_delete_droq(struct lio_device *lio_dev, uint32_t q_no)
+{
+ struct lio_droq *droq = lio_dev->droq[q_no];
+
+ lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
+
+ lio_droq_destroy_ring_buffers(droq);
+ rte_free(droq->recv_buf_list);
+ droq->recv_buf_list = NULL;
+ lio_dma_zone_free(lio_dev, droq->info_mz);
+ lio_dma_zone_free(lio_dev, droq->desc_ring_mz);
+
+ memset(droq, 0, LIO_DROQ_SIZE);
+}
+
+static void *
+lio_alloc_info_buffer(struct lio_device *lio_dev,
+ struct lio_droq *droq, unsigned int socket_id)
+{
+ droq->info_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
+ "info_list", droq->q_no,
+ (droq->max_count *
+ LIO_DROQ_INFO_SIZE),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (droq->info_mz == NULL)
+ return NULL;
+
+ droq->info_list_dma = droq->info_mz->phys_addr;
+ droq->info_alloc_size = droq->info_mz->len;
+ droq->info_base_addr = (size_t)droq->info_mz->addr;
+
+ return droq->info_mz->addr;
+}
+
+/**
+ * Allocates space for the descriptor ring for the droq and
+ * sets the base addr, num desc etc in Octeon registers.
+ *
+ * @param lio_dev - pointer to the lio device structure
+ * @param q_no - droq no.
+ * @param app_ctx - pointer to application context
+ * @return Success: 0 Failure: -1
+ */
+static int
+lio_init_droq(struct lio_device *lio_dev, uint32_t q_no,
+ uint32_t num_descs, uint32_t desc_size,
+ struct rte_mempool *mpool, unsigned int socket_id)
+{
+ uint32_t c_refill_threshold;
+ uint32_t desc_ring_size;
+ struct lio_droq *droq;
+
+ lio_dev_dbg(lio_dev, "OQ[%d]\n", q_no);
+
+ droq = lio_dev->droq[q_no];
+ droq->lio_dev = lio_dev;
+ droq->q_no = q_no;
+ droq->mpool = mpool;
+
+ c_refill_threshold = LIO_OQ_REFILL_THRESHOLD_CFG(lio_dev);
+
+ droq->max_count = num_descs;
+ droq->buffer_size = desc_size;
+
+ desc_ring_size = droq->max_count * LIO_DROQ_DESC_SIZE;
+ droq->desc_ring_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
+ "droq", q_no,
+ desc_ring_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ if (droq->desc_ring_mz == NULL) {
+ lio_dev_err(lio_dev,
+ "Output queue %d ring alloc failed\n", q_no);
+ return -1;
+ }
+
+ droq->desc_ring_dma = droq->desc_ring_mz->phys_addr;
+ droq->desc_ring = (struct lio_droq_desc *)droq->desc_ring_mz->addr;
+
+ lio_dev_dbg(lio_dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
+ q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
+ lio_dev_dbg(lio_dev, "droq[%d]: num_desc: %d\n", q_no,
+ droq->max_count);
+
+ droq->info_list = lio_alloc_info_buffer(lio_dev, droq, socket_id);
+ if (droq->info_list == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate memory for info list.\n");
+ goto init_droq_fail;
+ }
+
+ droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
+ (droq->max_count *
+ LIO_DROQ_RECVBUF_SIZE),
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (droq->recv_buf_list == NULL) {
+ lio_dev_err(lio_dev,
+ "Output queue recv buf list alloc failed\n");
+ goto init_droq_fail;
+ }
+
+ if (lio_droq_setup_ring_buffers(lio_dev, droq))
+ goto init_droq_fail;
+
+ droq->refill_threshold = c_refill_threshold;
+
+ rte_spinlock_init(&droq->lock);
+
+ lio_dev->fn_list.setup_oq_regs(lio_dev, q_no);
+
+ lio_dev->io_qmask.oq |= (1ULL << q_no);
+
+ return 0;
+
+init_droq_fail:
+ lio_delete_droq(lio_dev, q_no);
+
+ return -1;
+}
+
+int
+lio_setup_droq(struct lio_device *lio_dev, int oq_no, int num_descs,
+ int desc_size, struct rte_mempool *mpool, unsigned int socket_id)
+{
+ struct lio_droq *droq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (lio_dev->droq[oq_no]) {
+ lio_dev_dbg(lio_dev, "Droq %d in use\n", oq_no);
+ return 0;
+ }
+
+ /* Allocate the DS for the new droq. */
+ droq = rte_zmalloc_socket("ethdev RX queue", sizeof(*droq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (droq == NULL)
+ return -ENOMEM;
+
+ lio_dev->droq[oq_no] = droq;
+
+ /* Initialize the Droq */
+ if (lio_init_droq(lio_dev, oq_no, num_descs, desc_size, mpool,
+ socket_id)) {
+ lio_dev_err(lio_dev, "Droq[%u] Initialization Failed\n", oq_no);
+ rte_free(lio_dev->droq[oq_no]);
+ lio_dev->droq[oq_no] = NULL;
+ return -ENOMEM;
+ }
+
+ lio_dev->num_oqs++;
+
+ lio_dev_dbg(lio_dev, "Total number of OQ: %d\n", lio_dev->num_oqs);
+
+ /* Send credit for octeon output queues. credits are always
+ * sent after the output queue is enabled.
+ */
+ rte_write32(lio_dev->droq[oq_no]->max_count,
+ lio_dev->droq[oq_no]->pkts_credit_reg);
+ rte_wmb();
+
+ return 0;
+}
+
+static inline uint32_t
+lio_droq_get_bufcount(uint32_t buf_size, uint32_t total_len)
+{
+ uint32_t buf_cnt = 0;
+
+ while (total_len > (buf_size * buf_cnt))
+ buf_cnt++;
+
+ return buf_cnt;
+}
+
+/* If we were not able to refill all buffers, try to move around
+ * the buffers that were not dispatched.
+ */
+static inline uint32_t
+lio_droq_refill_pullup_descs(struct lio_droq *droq,
+ struct lio_droq_desc *desc_ring)
+{
+ uint32_t refill_index = droq->refill_idx;
+ uint32_t desc_refilled = 0;
+
+ while (refill_index != droq->read_idx) {
+ if (droq->recv_buf_list[refill_index].buffer) {
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ droq->recv_buf_list[refill_index].buffer;
+ desc_ring[droq->refill_idx].buffer_ptr =
+ desc_ring[refill_index].buffer_ptr;
+ droq->recv_buf_list[refill_index].buffer = NULL;
+ desc_ring[refill_index].buffer_ptr = 0;
+ do {
+ droq->refill_idx = lio_incr_index(
+ droq->refill_idx, 1,
+ droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ } while (droq->recv_buf_list[droq->refill_idx].buffer);
+ }
+ refill_index = lio_incr_index(refill_index, 1,
+ droq->max_count);
+ } /* while */
+
+ return desc_refilled;
+}
+
+/* lio_droq_refill
+ *
+ * @param lio_dev - pointer to the lio device structure
+ * @param droq - droq in which descriptors require new buffers.
+ *
+ * Description:
+ * Called during normal DROQ processing in interrupt mode or by the poll
+ * thread to refill the descriptors from which buffers were dispatched
+ * to upper layers. Attempts to allocate new buffers. If that fails, moves
+ * up buffers (that were not dispatched) to form a contiguous ring.
+ *
+ * Returns:
+ * No of descriptors refilled.
+ *
+ * Locks:
+ * This routine is called with droq->lock held.
+ */
+static uint32_t
+lio_droq_refill(struct lio_device *lio_dev, struct lio_droq *droq)
+{
+ struct lio_droq_desc *desc_ring;
+ uint32_t desc_refilled = 0;
+ void *buf = NULL;
+
+ desc_ring = droq->desc_ring;
+
+ while (droq->refill_count && (desc_refilled < droq->max_count)) {
+ /* If a valid buffer exists (happens if there is no dispatch),
+ * reuse the buffer, else allocate.
+ */
+ if (droq->recv_buf_list[droq->refill_idx].buffer == NULL) {
+ buf = lio_recv_buffer_alloc(lio_dev, droq->q_no);
+ /* If a buffer could not be allocated, no point in
+ * continuing
+ */
+ if (buf == NULL) {
+ droq->stats.rx_alloc_failure++;
+ break;
+ }
+
+ droq->recv_buf_list[droq->refill_idx].buffer = buf;
+ }
+
+ desc_ring[droq->refill_idx].buffer_ptr =
+ lio_map_ring(droq->recv_buf_list[droq->refill_idx].buffer);
+ /* Reset any previous values in the length field. */
+ droq->info_list[droq->refill_idx].length = 0;
+
+ droq->refill_idx = lio_incr_index(droq->refill_idx, 1,
+ droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ }
+
+ if (droq->refill_count)
+ desc_refilled += lio_droq_refill_pullup_descs(droq, desc_ring);
+
+ /* if droq->refill_count
+ * The refill count would not change in pass two. We only moved buffers
+ * to close the gap in the ring, but we would still have the same no. of
+ * buffers to refill.
+ */
+ return desc_refilled;
+}
+
+static int
+lio_droq_fast_process_packet(struct lio_device *lio_dev,
+ struct lio_droq *droq,
+ struct rte_mbuf **rx_pkts)
+{
+ struct rte_mbuf *nicbuf = NULL;
+ struct lio_droq_info *info;
+ uint32_t total_len = 0;
+ int data_total_len = 0;
+ uint32_t pkt_len = 0;
+ union octeon_rh *rh;
+ int data_pkts = 0;
+
+ info = &droq->info_list[droq->read_idx];
+ lio_swap_8B_data((uint64_t *)info, 2);
+
+ if (!info->length)
+ return -1;
+
+ /* Len of resp hdr in included in the received data len. */
+ info->length -= OCTEON_RH_SIZE;
+ rh = &info->rh;
+
+ total_len += (uint32_t)info->length;
+
+ if (lio_opcode_slow_path(rh)) {
+ uint32_t buf_cnt;
+
+ buf_cnt = lio_droq_get_bufcount(droq->buffer_size,
+ (uint32_t)info->length);
+ droq->read_idx = lio_incr_index(droq->read_idx, buf_cnt,
+ droq->max_count);
+ droq->refill_count += buf_cnt;
+ } else {
+ if (info->length <= droq->buffer_size) {
+ if (rh->r_dh.has_hash)
+ pkt_len = (uint32_t)(info->length - 8);
+ else
+ pkt_len = (uint32_t)info->length;
+
+ nicbuf = droq->recv_buf_list[droq->read_idx].buffer;
+ droq->recv_buf_list[droq->read_idx].buffer = NULL;
+ droq->read_idx = lio_incr_index(
+ droq->read_idx, 1,
+ droq->max_count);
+ droq->refill_count++;
+
+ if (likely(nicbuf != NULL)) {
+ nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ nicbuf->nb_segs = 1;
+ nicbuf->next = NULL;
+ /* We don't have a way to pass flags yet */
+ nicbuf->ol_flags = 0;
+ if (rh->r_dh.has_hash) {
+ uint64_t *hash_ptr;
+
+ nicbuf->ol_flags |= PKT_RX_RSS_HASH;
+ hash_ptr = rte_pktmbuf_mtod(nicbuf,
+ uint64_t *);
+ lio_swap_8B_data(hash_ptr, 1);
+ nicbuf->hash.rss = (uint32_t)*hash_ptr;
+ nicbuf->data_off += 8;
+ }
+
+ nicbuf->pkt_len = pkt_len;
+ nicbuf->data_len = pkt_len;
+ nicbuf->port = lio_dev->port_id;
+ /* Store the mbuf */
+ rx_pkts[data_pkts++] = nicbuf;
+ data_total_len += pkt_len;
+ }
+
+ /* Prefetch buffer pointers when on a cache line
+ * boundary
+ */
+ if ((droq->read_idx & 3) == 0) {
+ rte_prefetch0(
+ &droq->recv_buf_list[droq->read_idx]);
+ rte_prefetch0(
+ &droq->info_list[droq->read_idx]);
+ }
+ } else {
+ struct rte_mbuf *first_buf = NULL;
+ struct rte_mbuf *last_buf = NULL;
+
+ while (pkt_len < info->length) {
+ int cpy_len = 0;
+
+ cpy_len = ((pkt_len + droq->buffer_size) >
+ info->length)
+ ? ((uint32_t)info->length -
+ pkt_len)
+ : droq->buffer_size;
+
+ nicbuf =
+ droq->recv_buf_list[droq->read_idx].buffer;
+ droq->recv_buf_list[droq->read_idx].buffer =
+ NULL;
+
+ if (likely(nicbuf != NULL)) {
+ /* Note the first seg */
+ if (!pkt_len)
+ first_buf = nicbuf;
+
+ nicbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ nicbuf->nb_segs = 1;
+ nicbuf->next = NULL;
+ nicbuf->port = lio_dev->port_id;
+ /* We don't have a way to pass
+ * flags yet
+ */
+ nicbuf->ol_flags = 0;
+ if ((!pkt_len) && (rh->r_dh.has_hash)) {
+ uint64_t *hash_ptr;
+
+ nicbuf->ol_flags |=
+ PKT_RX_RSS_HASH;
+ hash_ptr = rte_pktmbuf_mtod(
+ nicbuf, uint64_t *);
+ lio_swap_8B_data(hash_ptr, 1);
+ nicbuf->hash.rss =
+ (uint32_t)*hash_ptr;
+ nicbuf->data_off += 8;
+ nicbuf->pkt_len = cpy_len - 8;
+ nicbuf->data_len = cpy_len - 8;
+ } else {
+ nicbuf->pkt_len = cpy_len;
+ nicbuf->data_len = cpy_len;
+ }
+
+ if (pkt_len)
+ first_buf->nb_segs++;
+
+ if (last_buf)
+ last_buf->next = nicbuf;
+
+ last_buf = nicbuf;
+ } else {
+ PMD_RX_LOG(lio_dev, ERR, "no buf\n");
+ }
+
+ pkt_len += cpy_len;
+ droq->read_idx = lio_incr_index(
+ droq->read_idx,
+ 1, droq->max_count);
+ droq->refill_count++;
+
+ /* Prefetch buffer pointers when on a
+ * cache line boundary
+ */
+ if ((droq->read_idx & 3) == 0) {
+ rte_prefetch0(&droq->recv_buf_list
+ [droq->read_idx]);
+
+ rte_prefetch0(
+ &droq->info_list[droq->read_idx]);
+ }
+ }
+ rx_pkts[data_pkts++] = first_buf;
+ if (rh->r_dh.has_hash)
+ data_total_len += (pkt_len - 8);
+ else
+ data_total_len += pkt_len;
+ }
+
+ /* Inform upper layer about packet checksum verification */
+ struct rte_mbuf *m = rx_pkts[data_pkts - 1];
+
+ if (rh->r_dh.csum_verified & LIO_IP_CSUM_VERIFIED)
+ m->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+
+ if (rh->r_dh.csum_verified & LIO_L4_CSUM_VERIFIED)
+ m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+
+ if (droq->refill_count >= droq->refill_threshold) {
+ int desc_refilled = lio_droq_refill(lio_dev, droq);
+
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory is
+ * accurate.
+ */
+ rte_wmb();
+ rte_write32(desc_refilled, droq->pkts_credit_reg);
+ /* make sure mmio write completes */
+ rte_wmb();
+ }
+
+ info->length = 0;
+ info->rh.rh64 = 0;
+
+ droq->stats.pkts_received++;
+ droq->stats.rx_pkts_received += data_pkts;
+ droq->stats.rx_bytes_received += data_total_len;
+ droq->stats.bytes_received += total_len;
+
+ return data_pkts;
+}
+
+static uint32_t
+lio_droq_fast_process_packets(struct lio_device *lio_dev,
+ struct lio_droq *droq,
+ struct rte_mbuf **rx_pkts,
+ uint32_t pkts_to_process)
+{
+ int ret, data_pkts = 0;
+ uint32_t pkt;
+
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ ret = lio_droq_fast_process_packet(lio_dev, droq,
+ &rx_pkts[data_pkts]);
+ if (ret < 0) {
+ lio_dev_err(lio_dev, "Port[%d] DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
+ lio_dev->port_id, droq->q_no,
+ droq->read_idx, pkts_to_process);
+ break;
+ }
+ data_pkts += ret;
+ }
+
+ rte_atomic64_sub(&droq->pkts_pending, pkt);
+
+ return data_pkts;
+}
+
+static inline uint32_t
+lio_droq_check_hw_for_pkts(struct lio_droq *droq)
+{
+ uint32_t last_count;
+ uint32_t pkt_count;
+
+ pkt_count = rte_read32(droq->pkts_sent_reg);
+
+ last_count = pkt_count - droq->pkt_count;
+ droq->pkt_count = pkt_count;
+
+ if (last_count)
+ rte_atomic64_add(&droq->pkts_pending, last_count);
+
+ return last_count;
+}
+
+uint16_t
+lio_dev_recv_pkts(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t budget)
+{
+ struct lio_droq *droq = rx_queue;
+ struct lio_device *lio_dev = droq->lio_dev;
+ uint32_t pkts_processed = 0;
+ uint32_t pkt_count = 0;
+
+ lio_droq_check_hw_for_pkts(droq);
+
+ pkt_count = rte_atomic64_read(&droq->pkts_pending);
+ if (!pkt_count)
+ return 0;
+
+ if (pkt_count > budget)
+ pkt_count = budget;
+
+ /* Grab the lock */
+ rte_spinlock_lock(&droq->lock);
+ pkts_processed = lio_droq_fast_process_packets(lio_dev,
+ droq, rx_pkts,
+ pkt_count);
+
+ if (droq->pkt_count) {
+ rte_write32(droq->pkt_count, droq->pkts_sent_reg);
+ droq->pkt_count = 0;
+ }
+
+ /* Release the spin lock */
+ rte_spinlock_unlock(&droq->lock);
+
+ return pkts_processed;
+}
+
+void
+lio_delete_droq_queue(struct lio_device *lio_dev,
+ int oq_no)
+{
+ lio_delete_droq(lio_dev, oq_no);
+ lio_dev->num_oqs--;
+ rte_free(lio_dev->droq[oq_no]);
+ lio_dev->droq[oq_no] = NULL;
+}
+
+/**
+ * lio_init_instr_queue()
+ * @param lio_dev - pointer to the lio device structure.
+ * @param txpciq - queue to be initialized.
+ *
+ * Called at driver init time for each input queue. iq_conf has the
+ * configuration parameters for the queue.
+ *
+ * @return Success: 0 Failure: -1
+ */
+static int
+lio_init_instr_queue(struct lio_device *lio_dev,
+ union octeon_txpciq txpciq,
+ uint32_t num_descs, unsigned int socket_id)
+{
+ uint32_t iq_no = (uint32_t)txpciq.s.q_no;
+ struct lio_instr_queue *iq;
+ uint32_t instr_type;
+ uint32_t q_size;
+
+ instr_type = LIO_IQ_INSTR_TYPE(lio_dev);
+
+ q_size = instr_type * num_descs;
+ iq = lio_dev->instr_queue[iq_no];
+ iq->iq_mz = rte_eth_dma_zone_reserve(lio_dev->eth_dev,
+ "instr_queue", iq_no, q_size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (iq->iq_mz == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate memory for instr queue %d\n",
+ iq_no);
+ return -1;
+ }
+
+ iq->base_addr_dma = iq->iq_mz->phys_addr;
+ iq->base_addr = (uint8_t *)iq->iq_mz->addr;
+
+ iq->max_count = num_descs;
+
+ /* Initialize a list to holds requests that have been posted to Octeon
+ * but has yet to be fetched by octeon
+ */
+ iq->request_list = rte_zmalloc_socket("request_list",
+ sizeof(*iq->request_list) *
+ num_descs,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (iq->request_list == NULL) {
+ lio_dev_err(lio_dev, "Alloc failed for IQ[%d] nr free list\n",
+ iq_no);
+ lio_dma_zone_free(lio_dev, iq->iq_mz);
+ return -1;
+ }
+
+ lio_dev_dbg(lio_dev, "IQ[%d]: base: %p basedma: %lx count: %d\n",
+ iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
+ iq->max_count);
+
+ iq->lio_dev = lio_dev;
+ iq->txpciq.txpciq64 = txpciq.txpciq64;
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->lio_read_index = 0;
+ iq->flush_index = 0;
+
+ rte_atomic64_set(&iq->instr_pending, 0);
+
+ /* Initialize the spinlock for this instruction queue */
+ rte_spinlock_init(&iq->lock);
+ rte_spinlock_init(&iq->post_lock);
+
+ rte_atomic64_clear(&iq->iq_flush_running);
+
+ lio_dev->io_qmask.iq |= (1ULL << iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+ lio_dev->io_qmask.iq64B |= ((instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (instr_type == 64);
+
+ lio_dev->fn_list.setup_iq_regs(lio_dev, iq_no);
+
+ return 0;
+}
+
+int
+lio_setup_instr_queue0(struct lio_device *lio_dev)
+{
+ union octeon_txpciq txpciq;
+ uint32_t num_descs = 0;
+ uint32_t iq_no = 0;
+
+ num_descs = LIO_NUM_DEF_TX_DESCS_CFG(lio_dev);
+
+ lio_dev->num_iqs = 0;
+
+ lio_dev->instr_queue[0] = rte_zmalloc(NULL,
+ sizeof(struct lio_instr_queue), 0);
+ if (lio_dev->instr_queue[0] == NULL)
+ return -ENOMEM;
+
+ lio_dev->instr_queue[0]->q_index = 0;
+ lio_dev->instr_queue[0]->app_ctx = (void *)(size_t)0;
+ txpciq.txpciq64 = 0;
+ txpciq.s.q_no = iq_no;
+ txpciq.s.pkind = lio_dev->pfvf_hsword.pkind;
+ txpciq.s.use_qpg = 0;
+ txpciq.s.qpg = 0;
+ if (lio_init_instr_queue(lio_dev, txpciq, num_descs, SOCKET_ID_ANY)) {
+ rte_free(lio_dev->instr_queue[0]);
+ lio_dev->instr_queue[0] = NULL;
+ return -1;
+ }
+
+ lio_dev->num_iqs++;
+
+ return 0;
+}
+
+/**
+ * lio_delete_instr_queue()
+ * @param lio_dev - pointer to the lio device structure.
+ * @param iq_no - queue to be deleted.
+ *
+ * Called at driver unload time for each input queue. Deletes all
+ * allocated resources for the input queue.
+ */
+static void
+lio_delete_instr_queue(struct lio_device *lio_dev, uint32_t iq_no)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+
+ rte_free(iq->request_list);
+ iq->request_list = NULL;
+ lio_dma_zone_free(lio_dev, iq->iq_mz);
+}
+
+void
+lio_free_instr_queue0(struct lio_device *lio_dev)
+{
+ lio_delete_instr_queue(lio_dev, 0);
+ rte_free(lio_dev->instr_queue[0]);
+ lio_dev->instr_queue[0] = NULL;
+ lio_dev->num_iqs--;
+}
+
+/* Return 0 on success, -1 on failure */
+int
+lio_setup_iq(struct lio_device *lio_dev, int q_index,
+ union octeon_txpciq txpciq, uint32_t num_descs, void *app_ctx,
+ unsigned int socket_id)
+{
+ uint32_t iq_no = (uint32_t)txpciq.s.q_no;
+
+ if (lio_dev->instr_queue[iq_no]) {
+ lio_dev_dbg(lio_dev, "IQ is in use. Cannot create the IQ: %d again\n",
+ iq_no);
+ lio_dev->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64;
+ lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
+ return 0;
+ }
+
+ lio_dev->instr_queue[iq_no] = rte_zmalloc_socket("ethdev TX queue",
+ sizeof(struct lio_instr_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (lio_dev->instr_queue[iq_no] == NULL)
+ return -1;
+
+ lio_dev->instr_queue[iq_no]->q_index = q_index;
+ lio_dev->instr_queue[iq_no]->app_ctx = app_ctx;
+
+ if (lio_init_instr_queue(lio_dev, txpciq, num_descs, socket_id))
+ goto release_lio_iq;
+
+ lio_dev->num_iqs++;
+ if (lio_dev->fn_list.enable_io_queues(lio_dev))
+ goto delete_lio_iq;
+
+ return 0;
+
+delete_lio_iq:
+ lio_delete_instr_queue(lio_dev, iq_no);
+ lio_dev->num_iqs--;
+release_lio_iq:
+ rte_free(lio_dev->instr_queue[iq_no]);
+ lio_dev->instr_queue[iq_no] = NULL;
+
+ return -1;
+}
+
+int
+lio_wait_for_instr_fetch(struct lio_device *lio_dev)
+{
+ int pending, instr_cnt;
+ int i, retry = 1000;
+
+ do {
+ instr_cnt = 0;
+
+ for (i = 0; i < LIO_MAX_INSTR_QUEUES(lio_dev); i++) {
+ if (!(lio_dev->io_qmask.iq & (1ULL << i)))
+ continue;
+
+ if (lio_dev->instr_queue[i] == NULL)
+ break;
+
+ pending = rte_atomic64_read(
+ &lio_dev->instr_queue[i]->instr_pending);
+ if (pending)
+ lio_flush_iq(lio_dev, lio_dev->instr_queue[i]);
+
+ instr_cnt += pending;
+ }
+
+ if (instr_cnt == 0)
+ break;
+
+ rte_delay_ms(1);
+
+ } while (retry-- && instr_cnt);
+
+ return instr_cnt;
+}
+
+static inline void
+lio_ring_doorbell(struct lio_device *lio_dev,
+ struct lio_instr_queue *iq)
+{
+ if (rte_atomic64_read(&lio_dev->status) == LIO_DEV_RUNNING) {
+ rte_write32(iq->fill_cnt, iq->doorbell_reg);
+ /* make sure doorbell write goes through */
+ rte_wmb();
+ iq->fill_cnt = 0;
+ }
+}
+
+static inline void
+copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd)
+{
+ uint8_t *iqptr, cmdsize;
+
+ cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
+ iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
+
+ rte_memcpy(iqptr, cmd, cmdsize);
+}
+
+static inline struct lio_iq_post_status
+post_command2(struct lio_instr_queue *iq, uint8_t *cmd)
+{
+ struct lio_iq_post_status st;
+
+ st.status = LIO_IQ_SEND_OK;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (rte_atomic64_read(&iq->instr_pending) >=
+ (int32_t)(iq->max_count - 1)) {
+ st.status = LIO_IQ_SEND_FAILED;
+ st.index = -1;
+ return st;
+ }
+
+ if (rte_atomic64_read(&iq->instr_pending) >=
+ (int32_t)(iq->max_count - 2))
+ st.status = LIO_IQ_SEND_STOP;
+
+ copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ st.index = iq->host_write_index;
+ iq->host_write_index = lio_incr_index(iq->host_write_index, 1,
+ iq->max_count);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ rte_wmb();
+
+ rte_atomic64_inc(&iq->instr_pending);
+
+ return st;
+}
+
+static inline void
+lio_add_to_request_list(struct lio_instr_queue *iq,
+ int idx, void *buf, int reqtype)
+{
+ iq->request_list[idx].buf = buf;
+ iq->request_list[idx].reqtype = reqtype;
+}
+
+static inline void
+lio_free_netsgbuf(void *buf)
+{
+ struct lio_buf_free_info *finfo = buf;
+ struct lio_device *lio_dev = finfo->lio_dev;
+ struct rte_mbuf *m = finfo->mbuf;
+ struct lio_gather *g = finfo->g;
+ uint8_t iq = finfo->iq_no;
+
+ /* This will take care of multiple segments also */
+ rte_pktmbuf_free(m);
+
+ rte_spinlock_lock(&lio_dev->glist_lock[iq]);
+ STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq], &g->list, entries);
+ rte_spinlock_unlock(&lio_dev->glist_lock[iq]);
+ rte_free(finfo);
+}
+
+/* Can only run in process context */
+static int
+lio_process_iq_request_list(struct lio_device *lio_dev,
+ struct lio_instr_queue *iq)
+{
+ struct octeon_instr_irh *irh = NULL;
+ uint32_t old = iq->flush_index;
+ struct lio_soft_command *sc;
+ uint32_t inst_count = 0;
+ int reqtype;
+ void *buf;
+
+ while (old != iq->lio_read_index) {
+ reqtype = iq->request_list[old].reqtype;
+ buf = iq->request_list[old].buf;
+
+ if (reqtype == LIO_REQTYPE_NONE)
+ goto skip_this;
+
+ switch (reqtype) {
+ case LIO_REQTYPE_NORESP_NET:
+ rte_pktmbuf_free((struct rte_mbuf *)buf);
+ break;
+ case LIO_REQTYPE_NORESP_NET_SG:
+ lio_free_netsgbuf(buf);
+ break;
+ case LIO_REQTYPE_SOFT_COMMAND:
+ sc = buf;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ /* We're expecting a response from Octeon.
+ * It's up to lio_process_ordered_list() to
+ * process sc. Add sc to the ordered soft
+ * command response list because we expect
+ * a response from Octeon.
+ */
+ rte_spinlock_lock(&lio_dev->response_list.lock);
+ rte_atomic64_inc(
+ &lio_dev->response_list.pending_req_count);
+ STAILQ_INSERT_TAIL(
+ &lio_dev->response_list.head,
+ &sc->node, entries);
+ rte_spinlock_unlock(
+ &lio_dev->response_list.lock);
+ } else {
+ if (sc->callback) {
+ /* This callback must not sleep */
+ sc->callback(LIO_REQUEST_DONE,
+ sc->callback_arg);
+ }
+ }
+ break;
+ default:
+ lio_dev_err(lio_dev,
+ "Unknown reqtype: %d buf: %p at idx %d\n",
+ reqtype, buf, old);
+ }
+
+ iq->request_list[old].buf = NULL;
+ iq->request_list[old].reqtype = 0;
+
+skip_this:
+ inst_count++;
+ old = lio_incr_index(old, 1, iq->max_count);
+ }
+
+ iq->flush_index = old;
+
+ return inst_count;
+}
+
+static void
+lio_update_read_index(struct lio_instr_queue *iq)
+{
+ uint32_t pkt_in_done = rte_read32(iq->inst_cnt_reg);
+ uint32_t last_done;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ /* Add last_done and modulo with the IQ size to get new index */
+ iq->lio_read_index = (iq->lio_read_index +
+ (uint32_t)(last_done & LIO_PKT_IN_DONE_CNT_MASK)) %
+ iq->max_count;
+}
+
+int
+lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq)
+{
+ uint32_t tot_inst_processed = 0;
+ uint32_t inst_processed = 0;
+ int tx_done = 1;
+
+ if (rte_atomic64_test_and_set(&iq->iq_flush_running) == 0)
+ return tx_done;
+
+ rte_spinlock_lock(&iq->lock);
+
+ lio_update_read_index(iq);
+
+ do {
+ /* Process any outstanding IQ packets. */
+ if (iq->flush_index == iq->lio_read_index)
+ break;
+
+ inst_processed = lio_process_iq_request_list(lio_dev, iq);
+
+ if (inst_processed) {
+ rte_atomic64_sub(&iq->instr_pending, inst_processed);
+ iq->stats.instr_processed += inst_processed;
+ }
+
+ tot_inst_processed += inst_processed;
+ inst_processed = 0;
+
+ } while (1);
+
+ rte_spinlock_unlock(&iq->lock);
+
+ rte_atomic64_clear(&iq->iq_flush_running);
+
+ return tx_done;
+}
+
+static int
+lio_send_command(struct lio_device *lio_dev, uint32_t iq_no, void *cmd,
+ void *buf, uint32_t datasize, uint32_t reqtype)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+ struct lio_iq_post_status st;
+
+ rte_spinlock_lock(&iq->post_lock);
+
+ st = post_command2(iq, cmd);
+
+ if (st.status != LIO_IQ_SEND_FAILED) {
+ lio_add_to_request_list(iq, st.index, buf, reqtype);
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, bytes_sent,
+ datasize);
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_posted, 1);
+
+ lio_ring_doorbell(lio_dev, iq);
+ } else {
+ LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, instr_dropped, 1);
+ }
+
+ rte_spinlock_unlock(&iq->post_lock);
+
+ return st.status;
+}
+
+void
+lio_prepare_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc, uint8_t opcode,
+ uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0,
+ uint64_t ossp1)
+{
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ RTE_ASSERT(opcode <= 15);
+ RTE_ASSERT(subcode <= 127);
+
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+
+ ih3->pkind = lio_dev->instr_queue[sc->iq_no]->txpciq.s.pkind;
+
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->uqpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
+ pki_ih3->utt = 1;
+
+ pki_ih3->tag = LIO_CONTROL;
+ pki_ih3->tagtype = OCTEON_ATOMIC_TAG;
+ pki_ih3->qpg = lio_dev->instr_queue[sc->iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7;
+ pki_ih3->sl = 8;
+
+ if (sc->datasize)
+ ih3->dlengsz = sc->datasize;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd3.ossp[0] = ossp0;
+ sc->cmd.cmd3.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rdp->pcie_port = lio_dev->pcie_port;
+ rdp->rlen = sc->rdatasize;
+ irh->rflag = 1;
+ /* PKI IH3 */
+ ih3->fsz = OCTEON_SOFT_CMD_RESP_IH3;
+ } else {
+ irh->rflag = 0;
+ /* PKI IH3 */
+ ih3->fsz = OCTEON_PCI_CMD_O3;
+ }
+}
+
+int
+lio_send_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc)
+{
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_irh *irh;
+ uint32_t len = 0;
+
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ if (ih3->dlengsz) {
+ RTE_ASSERT(sc->dmadptr);
+ sc->cmd.cmd3.dptr = sc->dmadptr;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ RTE_ASSERT(sc->dmarptr);
+ RTE_ASSERT(sc->status_word != NULL);
+ *sc->status_word = LIO_COMPLETION_WORD_INIT;
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ }
+
+ len = (uint32_t)ih3->dlengsz;
+
+ if (sc->wait_time)
+ sc->timeout = lio_uptime + sc->wait_time;
+
+ return lio_send_command(lio_dev, sc->iq_no, &sc->cmd, sc, len,
+ LIO_REQTYPE_SOFT_COMMAND);
+}
+
+int
+lio_setup_sc_buffer_pool(struct lio_device *lio_dev)
+{
+ char sc_pool_name[RTE_MEMPOOL_NAMESIZE];
+ uint16_t buf_size;
+
+ buf_size = LIO_SOFT_COMMAND_BUFFER_SIZE + RTE_PKTMBUF_HEADROOM;
+ snprintf(sc_pool_name, sizeof(sc_pool_name),
+ "lio_sc_pool_%u", lio_dev->port_id);
+ lio_dev->sc_buf_pool = rte_pktmbuf_pool_create(sc_pool_name,
+ LIO_MAX_SOFT_COMMAND_BUFFERS,
+ 0, 0, buf_size, SOCKET_ID_ANY);
+ return 0;
+}
+
+void
+lio_free_sc_buffer_pool(struct lio_device *lio_dev)
+{
+ rte_mempool_free(lio_dev->sc_buf_pool);
+}
+
+struct lio_soft_command *
+lio_alloc_soft_command(struct lio_device *lio_dev, uint32_t datasize,
+ uint32_t rdatasize, uint32_t ctxsize)
+{
+ uint32_t offset = sizeof(struct lio_soft_command);
+ struct lio_soft_command *sc;
+ struct rte_mbuf *m;
+ uint64_t dma_addr;
+
+ RTE_ASSERT((offset + datasize + rdatasize + ctxsize) <=
+ LIO_SOFT_COMMAND_BUFFER_SIZE);
+
+ m = rte_pktmbuf_alloc(lio_dev->sc_buf_pool);
+ if (m == NULL) {
+ lio_dev_err(lio_dev, "Cannot allocate mbuf for sc\n");
+ return NULL;
+ }
+
+ /* set rte_mbuf data size and there is only 1 segment */
+ m->pkt_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
+ m->data_len = LIO_SOFT_COMMAND_BUFFER_SIZE;
+
+ /* use rte_mbuf buffer for soft command */
+ sc = rte_pktmbuf_mtod(m, struct lio_soft_command *);
+ memset(sc, 0, LIO_SOFT_COMMAND_BUFFER_SIZE);
+ sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE;
+ sc->dma_addr = rte_mbuf_data_dma_addr(m);
+ sc->mbuf = m;
+
+ dma_addr = sc->dma_addr;
+
+ if (ctxsize) {
+ sc->ctxptr = (uint8_t *)sc + offset;
+ sc->ctxsize = ctxsize;
+ }
+
+ /* Start data at 128 byte boundary */
+ offset = (offset + ctxsize + 127) & 0xffffff80;
+
+ if (datasize) {
+ sc->virtdptr = (uint8_t *)sc + offset;
+ sc->dmadptr = dma_addr + offset;
+ sc->datasize = datasize;
+ }
+
+ /* Start rdata at 128 byte boundary */
+ offset = (offset + datasize + 127) & 0xffffff80;
+
+ if (rdatasize) {
+ RTE_ASSERT(rdatasize >= 16);
+ sc->virtrptr = (uint8_t *)sc + offset;
+ sc->dmarptr = dma_addr + offset;
+ sc->rdatasize = rdatasize;
+ sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) +
+ rdatasize - 8);
+ }
+
+ return sc;
+}
+
+void
+lio_free_soft_command(struct lio_soft_command *sc)
+{
+ rte_pktmbuf_free(sc->mbuf);
+}
+
+void
+lio_setup_response_list(struct lio_device *lio_dev)
+{
+ STAILQ_INIT(&lio_dev->response_list.head);
+ rte_spinlock_init(&lio_dev->response_list.lock);
+ rte_atomic64_set(&lio_dev->response_list.pending_req_count, 0);
+}
+
+int
+lio_process_ordered_list(struct lio_device *lio_dev)
+{
+ int resp_to_process = LIO_MAX_ORD_REQS_TO_PROCESS;
+ struct lio_response_list *ordered_sc_list;
+ struct lio_soft_command *sc;
+ int request_complete = 0;
+ uint64_t status64;
+ uint32_t status;
+
+ ordered_sc_list = &lio_dev->response_list;
+
+ do {
+ rte_spinlock_lock(&ordered_sc_list->lock);
+
+ if (STAILQ_EMPTY(&ordered_sc_list->head)) {
+ /* ordered_sc_list is empty; there is
+ * nothing to process
+ */
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+ return -1;
+ }
+
+ sc = LIO_STQUEUE_FIRST_ENTRY(&ordered_sc_list->head,
+ struct lio_soft_command, node);
+
+ status = LIO_REQUEST_PENDING;
+
+ /* check if octeon has finished DMA'ing a response
+ * to where rptr is pointing to
+ */
+ status64 = *sc->status_word;
+
+ if (status64 != LIO_COMPLETION_WORD_INIT) {
+ /* This logic ensures that all 64b have been written.
+ * 1. check byte 0 for non-FF
+ * 2. if non-FF, then swap result from BE to host order
+ * 3. check byte 7 (swapped to 0) for non-FF
+ * 4. if non-FF, use the low 32-bit status code
+ * 5. if either byte 0 or byte 7 is FF, don't use status
+ */
+ if ((status64 & 0xff) != 0xff) {
+ lio_swap_8B_data(&status64, 1);
+ if (((status64 & 0xff) != 0xff)) {
+ /* retrieve 16-bit firmware status */
+ status = (uint32_t)(status64 &
+ 0xffffULL);
+ if (status) {
+ status =
+ LIO_FIRMWARE_STATUS_CODE(
+ status);
+ } else {
+ /* i.e. no error */
+ status = LIO_REQUEST_DONE;
+ }
+ }
+ }
+ } else if ((sc->timeout && lio_check_timeout(lio_uptime,
+ sc->timeout))) {
+ lio_dev_err(lio_dev,
+ "cmd failed, timeout (%ld, %ld)\n",
+ (long)lio_uptime, (long)sc->timeout);
+ status = LIO_REQUEST_TIMEOUT;
+ }
+
+ if (status != LIO_REQUEST_PENDING) {
+ /* we have received a response or we have timed out.
+ * remove node from linked list
+ */
+ STAILQ_REMOVE(&ordered_sc_list->head,
+ &sc->node, lio_stailq_node, entries);
+ rte_atomic64_dec(
+ &lio_dev->response_list.pending_req_count);
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+
+ if (sc->callback)
+ sc->callback(status, sc->callback_arg);
+
+ request_complete++;
+ } else {
+ /* no response yet */
+ request_complete = 0;
+ rte_spinlock_unlock(&ordered_sc_list->lock);
+ }
+
+ /* If we hit the Max Ordered requests to process every loop,
+ * we quit and let this function be invoked the next time
+ * the poll thread runs to process the remaining requests.
+ * This function can take up the entire CPU if there is
+ * no upper limit to the requests processed.
+ */
+ if (request_complete >= resp_to_process)
+ break;
+ } while (request_complete);
+
+ return 0;
+}
+
+static inline struct lio_stailq_node *
+list_delete_first_node(struct lio_stailq_head *head)
+{
+ struct lio_stailq_node *node;
+
+ if (STAILQ_EMPTY(head))
+ node = NULL;
+ else
+ node = STAILQ_FIRST(head);
+
+ if (node)
+ STAILQ_REMOVE(head, node, lio_stailq_node, entries);
+
+ return node;
+}
+
+void
+lio_delete_sglist(struct lio_instr_queue *txq)
+{
+ struct lio_device *lio_dev = txq->lio_dev;
+ int iq_no = txq->q_index;
+ struct lio_gather *g;
+
+ if (lio_dev->glist_head == NULL)
+ return;
+
+ do {
+ g = (struct lio_gather *)list_delete_first_node(
+ &lio_dev->glist_head[iq_no]);
+ if (g) {
+ if (g->sg)
+ rte_free(
+ (void *)((unsigned long)g->sg - g->adjust));
+ rte_free(g);
+ }
+ } while (g);
+}
+
+/**
+ * \brief Setup gather lists
+ * @param lio per-network private data
+ */
+int
+lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
+ int fw_mapped_iq, int num_descs, unsigned int socket_id)
+{
+ struct lio_gather *g;
+ int i;
+
+ rte_spinlock_init(&lio_dev->glist_lock[iq_no]);
+
+ STAILQ_INIT(&lio_dev->glist_head[iq_no]);
+
+ for (i = 0; i < num_descs; i++) {
+ g = rte_zmalloc_socket(NULL, sizeof(*g), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (g == NULL) {
+ lio_dev_err(lio_dev,
+ "lio_gather memory allocation failed for qno %d\n",
+ iq_no);
+ break;
+ }
+
+ g->sg_size =
+ ((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE);
+
+ g->sg = rte_zmalloc_socket(NULL, g->sg_size + 8,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (g->sg == NULL) {
+ lio_dev_err(lio_dev,
+ "sg list memory allocation failed for qno %d\n",
+ iq_no);
+ rte_free(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit boundary */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg =
+ (struct lio_sg_entry *)((unsigned long)g->sg +
+ g->adjust);
+ }
+
+ STAILQ_INSERT_TAIL(&lio_dev->glist_head[iq_no], &g->list,
+ entries);
+ }
+
+ if (i != num_descs) {
+ lio_delete_sglist(lio_dev->instr_queue[fw_mapped_iq]);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no)
+{
+ lio_delete_instr_queue(lio_dev, iq_no);
+ rte_free(lio_dev->instr_queue[iq_no]);
+ lio_dev->instr_queue[iq_no] = NULL;
+ lio_dev->num_iqs--;
+}
+
+static inline uint32_t
+lio_iq_get_available(struct lio_device *lio_dev, uint32_t q_no)
+{
+ return ((lio_dev->instr_queue[q_no]->max_count - 1) -
+ (uint32_t)rte_atomic64_read(
+ &lio_dev->instr_queue[q_no]->instr_pending));
+}
+
+static inline int
+lio_iq_is_full(struct lio_device *lio_dev, uint32_t q_no)
+{
+ return ((uint32_t)rte_atomic64_read(
+ &lio_dev->instr_queue[q_no]->instr_pending) >=
+ (lio_dev->instr_queue[q_no]->max_count - 2));
+}
+
+static int
+lio_dev_cleanup_iq(struct lio_device *lio_dev, int iq_no)
+{
+ struct lio_instr_queue *iq = lio_dev->instr_queue[iq_no];
+ uint32_t count = 10000;
+
+ while ((lio_iq_get_available(lio_dev, iq_no) < LIO_FLUSH_WM(iq)) &&
+ --count)
+ lio_flush_iq(lio_dev, iq);
+
+ return count ? 0 : 1;
+}
+
+static void
+lio_ctrl_cmd_callback(uint32_t status __rte_unused, void *sc_ptr)
+{
+ struct lio_soft_command *sc = sc_ptr;
+ struct lio_dev_ctrl_cmd *ctrl_cmd;
+ struct lio_ctrl_pkt *ctrl_pkt;
+
+ ctrl_pkt = (struct lio_ctrl_pkt *)sc->ctxptr;
+ ctrl_cmd = ctrl_pkt->ctrl_cmd;
+ ctrl_cmd->cond = 1;
+
+ lio_free_soft_command(sc);
+}
+
+static inline struct lio_soft_command *
+lio_alloc_ctrl_pkt_sc(struct lio_device *lio_dev,
+ struct lio_ctrl_pkt *ctrl_pkt)
+{
+ struct lio_soft_command *sc = NULL;
+ uint32_t uddsize, datasize;
+ uint32_t rdatasize;
+ uint8_t *data;
+
+ uddsize = (uint32_t)(ctrl_pkt->ncmd.s.more * 8);
+
+ datasize = OCTEON_CMD_SIZE + uddsize;
+ rdatasize = (ctrl_pkt->wait_time) ? 16 : 0;
+
+ sc = lio_alloc_soft_command(lio_dev, datasize,
+ rdatasize, sizeof(struct lio_ctrl_pkt));
+ if (sc == NULL)
+ return NULL;
+
+ rte_memcpy(sc->ctxptr, ctrl_pkt, sizeof(struct lio_ctrl_pkt));
+
+ data = (uint8_t *)sc->virtdptr;
+
+ rte_memcpy(data, &ctrl_pkt->ncmd, OCTEON_CMD_SIZE);
+
+ lio_swap_8B_data((uint64_t *)data, OCTEON_CMD_SIZE >> 3);
+
+ if (uddsize) {
+ /* Endian-Swap for UDD should have been done by caller. */
+ rte_memcpy(data + OCTEON_CMD_SIZE, ctrl_pkt->udd, uddsize);
+ }
+
+ sc->iq_no = (uint32_t)ctrl_pkt->iq_no;
+
+ lio_prepare_soft_command(lio_dev, sc,
+ LIO_OPCODE, LIO_OPCODE_CMD,
+ 0, 0, 0);
+
+ sc->callback = lio_ctrl_cmd_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = ctrl_pkt->wait_time;
+
+ return sc;
+}
+
+int
+lio_send_ctrl_pkt(struct lio_device *lio_dev, struct lio_ctrl_pkt *ctrl_pkt)
+{
+ struct lio_soft_command *sc = NULL;
+ int retval;
+
+ sc = lio_alloc_ctrl_pkt_sc(lio_dev, ctrl_pkt);
+ if (sc == NULL) {
+ lio_dev_err(lio_dev, "soft command allocation failed\n");
+ return -1;
+ }
+
+ retval = lio_send_soft_command(lio_dev, sc);
+ if (retval == LIO_IQ_SEND_FAILED) {
+ lio_free_soft_command(sc);
+ lio_dev_err(lio_dev, "Port: %d soft command: %d send failed status: %x\n",
+ lio_dev->port_id, ctrl_pkt->ncmd.s.cmd, retval);
+ return -1;
+ }
+
+ return retval;
+}
+
+/** Send data packet to the device
+ * @param lio_dev - lio device pointer
+ * @param ndata - control structure with queueing, and buffer information
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
+ */
+static inline int
+lio_send_data_pkt(struct lio_device *lio_dev, struct lio_data_pkt *ndata)
+{
+ return lio_send_command(lio_dev, ndata->q_no, &ndata->cmd,
+ ndata->buf, ndata->datasize, ndata->reqtype);
+}
+
+uint16_t
+lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts, uint16_t nb_pkts)
+{
+ struct lio_instr_queue *txq = tx_queue;
+ union lio_cmd_setup cmdsetup;
+ struct lio_device *lio_dev;
+ struct lio_iq_stats *stats;
+ struct lio_data_pkt ndata;
+ int i, processed = 0;
+ struct rte_mbuf *m;
+ uint32_t tag = 0;
+ int status = 0;
+ int iq_no;
+
+ lio_dev = txq->lio_dev;
+ iq_no = txq->txpciq.s.q_no;
+ stats = &lio_dev->instr_queue[iq_no]->stats;
+
+ if (!lio_dev->intf_open || !lio_dev->linfo.link.s.link_up) {
+ PMD_TX_LOG(lio_dev, ERR, "Transmit failed link_status : %d\n",
+ lio_dev->linfo.link.s.link_up);
+ goto xmit_failed;
+ }
+
+ lio_dev_cleanup_iq(lio_dev, iq_no);
+
+ for (i = 0; i < nb_pkts; i++) {
+ uint32_t pkt_len = 0;
+
+ m = pkts[i];
+
+ /* Prepare the attributes for the data to be passed to BASE. */
+ memset(&ndata, 0, sizeof(struct lio_data_pkt));
+
+ ndata.buf = m;
+
+ ndata.q_no = iq_no;
+ if (lio_iq_is_full(lio_dev, ndata.q_no)) {
+ stats->tx_iq_busy++;
+ if (lio_dev_cleanup_iq(lio_dev, iq_no)) {
+ PMD_TX_LOG(lio_dev, ERR,
+ "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ break;
+ }
+ }
+
+ cmdsetup.cmd_setup64 = 0;
+ cmdsetup.s.iq_no = iq_no;
+
+ /* check checksum offload flags to form cmd */
+ if (m->ol_flags & PKT_TX_IP_CKSUM)
+ cmdsetup.s.ip_csum = 1;
+
+ if (m->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cmdsetup.s.tnl_csum = 1;
+ else if ((m->ol_flags & PKT_TX_TCP_CKSUM) ||
+ (m->ol_flags & PKT_TX_UDP_CKSUM))
+ cmdsetup.s.transport_csum = 1;
+
+ if (m->nb_segs == 1) {
+ pkt_len = rte_pktmbuf_data_len(m);
+ cmdsetup.s.u.datasize = pkt_len;
+ lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
+ &cmdsetup, tag);
+ ndata.cmd.cmd3.dptr = rte_mbuf_data_dma_addr(m);
+ ndata.reqtype = LIO_REQTYPE_NORESP_NET;
+ } else {
+ struct lio_buf_free_info *finfo;
+ struct lio_gather *g;
+ phys_addr_t phyaddr;
+ int i, frags;
+
+ finfo = (struct lio_buf_free_info *)rte_malloc(NULL,
+ sizeof(*finfo), 0);
+ if (finfo == NULL) {
+ PMD_TX_LOG(lio_dev, ERR,
+ "free buffer alloc failed\n");
+ goto xmit_failed;
+ }
+
+ rte_spinlock_lock(&lio_dev->glist_lock[iq_no]);
+ g = (struct lio_gather *)list_delete_first_node(
+ &lio_dev->glist_head[iq_no]);
+ rte_spinlock_unlock(&lio_dev->glist_lock[iq_no]);
+ if (g == NULL) {
+ PMD_TX_LOG(lio_dev, ERR,
+ "Transmit scatter gather: glist null!\n");
+ goto xmit_failed;
+ }
+
+ cmdsetup.s.gather = 1;
+ cmdsetup.s.u.gatherptrs = m->nb_segs;
+ lio_prepare_pci_cmd(lio_dev, &ndata.cmd,
+ &cmdsetup, tag);
+
+ memset(g->sg, 0, g->sg_size);
+ g->sg[0].ptr[0] = rte_mbuf_data_dma_addr(m);
+ lio_add_sg_size(&g->sg[0], m->data_len, 0);
+ pkt_len = m->data_len;
+ finfo->mbuf = m;
+
+ /* First seg taken care above */
+ frags = m->nb_segs - 1;
+ i = 1;
+ m = m->next;
+ while (frags--) {
+ g->sg[(i >> 2)].ptr[(i & 3)] =
+ rte_mbuf_data_dma_addr(m);
+ lio_add_sg_size(&g->sg[(i >> 2)],
+ m->data_len, (i & 3));
+ pkt_len += m->data_len;
+ i++;
+ m = m->next;
+ }
+
+ phyaddr = rte_mem_virt2phy(g->sg);
+ if (phyaddr == RTE_BAD_PHYS_ADDR) {
+ PMD_TX_LOG(lio_dev, ERR, "bad phys addr\n");
+ goto xmit_failed;
+ }
+
+ ndata.cmd.cmd3.dptr = phyaddr;
+ ndata.reqtype = LIO_REQTYPE_NORESP_NET_SG;
+
+ finfo->g = g;
+ finfo->lio_dev = lio_dev;
+ finfo->iq_no = (uint64_t)iq_no;
+ ndata.buf = finfo;
+ }
+
+ ndata.datasize = pkt_len;
+
+ status = lio_send_data_pkt(lio_dev, &ndata);
+
+ if (unlikely(status == LIO_IQ_SEND_FAILED)) {
+ PMD_TX_LOG(lio_dev, ERR, "send failed\n");
+ break;
+ }
+
+ if (unlikely(status == LIO_IQ_SEND_STOP)) {
+ PMD_TX_LOG(lio_dev, DEBUG, "iq full\n");
+ /* create space as iq is full */
+ lio_dev_cleanup_iq(lio_dev, iq_no);
+ }
+
+ stats->tx_done++;
+ stats->tx_tot_bytes += pkt_len;
+ processed++;
+ }
+
+xmit_failed:
+ stats->tx_dropped += (nb_pkts - processed);
+
+ return processed;
+}
+
+void
+lio_dev_clear_queues(struct rte_eth_dev *eth_dev)
+{
+ struct lio_instr_queue *txq;
+ struct lio_droq *rxq;
+ uint16_t i;
+
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ txq = eth_dev->data->tx_queues[i];
+ if (txq != NULL) {
+ lio_dev_tx_queue_release(txq);
+ eth_dev->data->tx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ rxq = eth_dev->data->rx_queues[i];
+ if (rxq != NULL) {
+ lio_dev_rx_queue_release(rxq);
+ eth_dev->data->rx_queues[i] = NULL;
+ }
+ }
+}
diff --git a/drivers/net/liquidio/lio_rxtx.h b/drivers/net/liquidio/lio_rxtx.h
new file mode 100644
index 00000000..85685dc7
--- /dev/null
+++ b/drivers/net/liquidio/lio_rxtx.h
@@ -0,0 +1,769 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_RXTX_H_
+#define _LIO_RXTX_H_
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include <rte_spinlock.h>
+#include <rte_memory.h>
+
+#include "lio_struct.h"
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#define LIO_STQUEUE_FIRST_ENTRY(ptr, type, elem) \
+ (type *)((char *)((ptr)->stqh_first) - offsetof(type, elem))
+
+#define lio_check_timeout(cur_time, chk_time) ((cur_time) > (chk_time))
+
+#define lio_uptime \
+ (size_t)(rte_get_timer_cycles() / rte_get_timer_hz())
+
+/** Descriptor format.
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ * -# Physical (bus) address of the data buffer.
+ * -# Physical (bus) address of a lio_droq_info structure.
+ * The device DMA's incoming packets and its information at the address
+ * given by these descriptor fields.
+ */
+struct lio_droq_desc {
+ /** The buffer pointer */
+ uint64_t buffer_ptr;
+
+ /** The Info pointer */
+ uint64_t info_ptr;
+};
+
+#define LIO_DROQ_DESC_SIZE (sizeof(struct lio_droq_desc))
+
+/** Information about packet DMA'ed by Octeon.
+ * The format of the information available at Info Pointer after Octeon
+ * has posted a packet. Not all descriptors have valid information. Only
+ * the Info field of the first descriptor for a packet has information
+ * about the packet.
+ */
+struct lio_droq_info {
+ /** The Output Receive Header. */
+ union octeon_rh rh;
+
+ /** The Length of the packet. */
+ uint64_t length;
+};
+
+#define LIO_DROQ_INFO_SIZE (sizeof(struct lio_droq_info))
+
+/** Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers.
+ */
+struct lio_recv_buffer {
+ /** Packet buffer, including meta data. */
+ void *buffer;
+
+ /** Data in the packet buffer. */
+ uint8_t *data;
+
+};
+
+#define LIO_DROQ_RECVBUF_SIZE (sizeof(struct lio_recv_buffer))
+
+#define LIO_DROQ_SIZE (sizeof(struct lio_droq))
+
+#define LIO_IQ_SEND_OK 0
+#define LIO_IQ_SEND_STOP 1
+#define LIO_IQ_SEND_FAILED -1
+
+/* conditions */
+#define LIO_REQTYPE_NONE 0
+#define LIO_REQTYPE_NORESP_NET 1
+#define LIO_REQTYPE_NORESP_NET_SG 2
+#define LIO_REQTYPE_SOFT_COMMAND 3
+
+struct lio_request_list {
+ uint32_t reqtype;
+ void *buf;
+};
+
+/*---------------------- INSTRUCTION FORMAT ----------------------------*/
+
+struct lio_instr3_64B {
+ /** Pointer where the input data is available. */
+ uint64_t dptr;
+
+ /** Instruction Header. */
+ uint64_t ih3;
+
+ /** Instruction Header. */
+ uint64_t pki_ih3;
+
+ /** Input Request Header. */
+ uint64_t irh;
+
+ /** opcode/subcode specific parameters */
+ uint64_t ossp[2];
+
+ /** Return Data Parameters */
+ uint64_t rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ uint64_t rptr;
+
+};
+
+union lio_instr_64B {
+ struct lio_instr3_64B cmd3;
+};
+
+/** The size of each buffer in soft command buffer pool */
+#define LIO_SOFT_COMMAND_BUFFER_SIZE 1536
+
+/** Maximum number of buffers to allocate into soft command buffer pool */
+#define LIO_MAX_SOFT_COMMAND_BUFFERS 255
+
+struct lio_soft_command {
+ /** Soft command buffer info. */
+ struct lio_stailq_node node;
+ uint64_t dma_addr;
+ uint32_t size;
+
+ /** Command and return status */
+ union lio_instr_64B cmd;
+
+#define LIO_COMPLETION_WORD_INIT 0xffffffffffffffffULL
+ uint64_t *status_word;
+
+ /** Data buffer info */
+ void *virtdptr;
+ uint64_t dmadptr;
+ uint32_t datasize;
+
+ /** Return buffer info */
+ void *virtrptr;
+ uint64_t dmarptr;
+ uint32_t rdatasize;
+
+ /** Context buffer info */
+ void *ctxptr;
+ uint32_t ctxsize;
+
+ /** Time out and callback */
+ size_t wait_time;
+ size_t timeout;
+ uint32_t iq_no;
+ void (*callback)(uint32_t, void *);
+ void *callback_arg;
+ struct rte_mbuf *mbuf;
+};
+
+struct lio_iq_post_status {
+ int status;
+ int index;
+};
+
+/* wqe
+ * --------------- 0
+ * | wqe word0-3 |
+ * --------------- 32
+ * | PCI IH |
+ * --------------- 40
+ * | RPTR |
+ * --------------- 48
+ * | PCI IRH |
+ * --------------- 56
+ * | OCTEON_CMD |
+ * --------------- 64
+ * | Addtl 8-BData |
+ * | |
+ * ---------------
+ */
+
+union octeon_cmd {
+ uint64_t cmd64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t cmd : 5;
+
+ uint64_t more : 6; /* How many udd words follow the command */
+
+ uint64_t reserved : 29;
+
+ uint64_t param1 : 16;
+
+ uint64_t param2 : 8;
+
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+ uint64_t param2 : 8;
+
+ uint64_t param1 : 16;
+
+ uint64_t reserved : 29;
+
+ uint64_t more : 6;
+
+ uint64_t cmd : 5;
+
+#endif
+ } s;
+};
+
+#define OCTEON_CMD_SIZE (sizeof(union octeon_cmd))
+
+/* Maximum number of 8-byte words can be
+ * sent in a NIC control message.
+ */
+#define LIO_MAX_NCTRL_UDD 32
+
+/* Structure of control information passed by driver to the BASE
+ * layer when sending control commands to Octeon device software.
+ */
+struct lio_ctrl_pkt {
+ /** Command to be passed to the Octeon device software. */
+ union octeon_cmd ncmd;
+
+ /** Send buffer */
+ void *data;
+ uint64_t dmadata;
+
+ /** Response buffer */
+ void *rdata;
+ uint64_t dmardata;
+
+ /** Additional data that may be needed by some commands. */
+ uint64_t udd[LIO_MAX_NCTRL_UDD];
+
+ /** Input queue to use to send this command. */
+ uint64_t iq_no;
+
+ /** Time to wait for Octeon software to respond to this control command.
+ * If wait_time is 0, BASE assumes no response is expected.
+ */
+ size_t wait_time;
+
+ struct lio_dev_ctrl_cmd *ctrl_cmd;
+};
+
+/** Structure of data information passed by driver to the BASE
+ * layer when forwarding data to Octeon device software.
+ */
+struct lio_data_pkt {
+ /** Pointer to information maintained by NIC module for this packet. The
+ * BASE layer passes this as-is to the driver.
+ */
+ void *buf;
+
+ /** Type of buffer passed in "buf" above. */
+ uint32_t reqtype;
+
+ /** Total data bytes to be transferred in this command. */
+ uint32_t datasize;
+
+ /** Command to be passed to the Octeon device software. */
+ union lio_instr_64B cmd;
+
+ /** Input queue to use to send this command. */
+ uint32_t q_no;
+};
+
+/** Structure passed by driver to BASE layer to prepare a command to send
+ * network data to Octeon.
+ */
+union lio_cmd_setup {
+ struct {
+ uint32_t iq_no : 8;
+ uint32_t gather : 1;
+ uint32_t timestamp : 1;
+ uint32_t ip_csum : 1;
+ uint32_t transport_csum : 1;
+ uint32_t tnl_csum : 1;
+ uint32_t rsvd : 19;
+
+ union {
+ uint32_t datasize;
+ uint32_t gatherptrs;
+ } u;
+ } s;
+
+ uint64_t cmd_setup64;
+};
+
+/* Instruction Header */
+struct octeon_instr_ih3 {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+
+ /** Reserved3 */
+ uint64_t reserved3 : 1;
+
+ /** Gather indicator 1=gather*/
+ uint64_t gather : 1;
+
+ /** Data length OR no. of entries in gather list */
+ uint64_t dlengsz : 14;
+
+ /** Front Data size */
+ uint64_t fsz : 6;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 4;
+
+ /** PKI port kind - PKIND */
+ uint64_t pkind : 6;
+
+ /** Reserved1 */
+ uint64_t reserved1 : 32;
+
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /** Reserved1 */
+ uint64_t reserved1 : 32;
+
+ /** PKI port kind - PKIND */
+ uint64_t pkind : 6;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 4;
+
+ /** Front Data size */
+ uint64_t fsz : 6;
+
+ /** Data length OR no. of entries in gather list */
+ uint64_t dlengsz : 14;
+
+ /** Gather indicator 1=gather*/
+ uint64_t gather : 1;
+
+ /** Reserved3 */
+ uint64_t reserved3 : 1;
+
+#endif
+};
+
+/* PKI Instruction Header(PKI IH) */
+struct octeon_instr_pki_ih3 {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+
+ /** Wider bit */
+ uint64_t w : 1;
+
+ /** Raw mode indicator 1 = RAW */
+ uint64_t raw : 1;
+
+ /** Use Tag */
+ uint64_t utag : 1;
+
+ /** Use QPG */
+ uint64_t uqpg : 1;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 1;
+
+ /** Parse Mode */
+ uint64_t pm : 3;
+
+ /** Skip Length */
+ uint64_t sl : 8;
+
+ /** Use Tag Type */
+ uint64_t utt : 1;
+
+ /** Tag type */
+ uint64_t tagtype : 2;
+
+ /** Reserved1 */
+ uint64_t reserved1 : 2;
+
+ /** QPG Value */
+ uint64_t qpg : 11;
+
+ /** Tag Value */
+ uint64_t tag : 32;
+
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+
+ /** Tag Value */
+ uint64_t tag : 32;
+
+ /** QPG Value */
+ uint64_t qpg : 11;
+
+ /** Reserved1 */
+ uint64_t reserved1 : 2;
+
+ /** Tag type */
+ uint64_t tagtype : 2;
+
+ /** Use Tag Type */
+ uint64_t utt : 1;
+
+ /** Skip Length */
+ uint64_t sl : 8;
+
+ /** Parse Mode */
+ uint64_t pm : 3;
+
+ /** Reserved2 */
+ uint64_t reserved2 : 1;
+
+ /** Use QPG */
+ uint64_t uqpg : 1;
+
+ /** Use Tag */
+ uint64_t utag : 1;
+
+ /** Raw mode indicator 1 = RAW */
+ uint64_t raw : 1;
+
+ /** Wider bit */
+ uint64_t w : 1;
+#endif
+};
+
+/** Input Request Header */
+struct octeon_instr_irh {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t opcode : 4;
+ uint64_t rflag : 1;
+ uint64_t subcode : 7;
+ uint64_t vlan : 12;
+ uint64_t priority : 3;
+ uint64_t reserved : 5;
+ uint64_t ossp : 32; /* opcode/subcode specific parameters */
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint64_t ossp : 32; /* opcode/subcode specific parameters */
+ uint64_t reserved : 5;
+ uint64_t priority : 3;
+ uint64_t vlan : 12;
+ uint64_t subcode : 7;
+ uint64_t rflag : 1;
+ uint64_t opcode : 4;
+#endif
+};
+
+/* pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+#define OCTEON_SOFT_CMD_RESP_IH3 (40 + 8)
+/* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
+#define OCTEON_PCI_CMD_O3 (24 + 8)
+
+/** Return Data Parameters */
+struct octeon_instr_rdp {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t reserved : 49;
+ uint64_t pcie_port : 3;
+ uint64_t rlen : 12;
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint64_t rlen : 12;
+ uint64_t pcie_port : 3;
+ uint64_t reserved : 49;
+#endif
+};
+
+union octeon_packet_params {
+ uint32_t pkt_params32;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint32_t reserved : 24;
+ uint32_t ip_csum : 1; /* Perform IP header checksum(s) */
+ /* Perform Outer transport header checksum */
+ uint32_t transport_csum : 1;
+ /* Find tunnel, and perform transport csum. */
+ uint32_t tnl_csum : 1;
+ uint32_t tsflag : 1; /* Timestamp this packet */
+ uint32_t ipsec_ops : 4; /* IPsec operation */
+#else
+ uint32_t ipsec_ops : 4;
+ uint32_t tsflag : 1;
+ uint32_t tnl_csum : 1;
+ uint32_t transport_csum : 1;
+ uint32_t ip_csum : 1;
+ uint32_t reserved : 7;
+#endif
+ } s;
+};
+
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+lio_prepare_pci_cmd(struct lio_device *lio_dev,
+ union lio_instr_64B *cmd,
+ union lio_cmd_setup *setup,
+ uint32_t tag)
+{
+ union octeon_packet_params packet_params;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_ih3 *ih3;
+ int port;
+
+ memset(cmd, 0, sizeof(union lio_instr_64B));
+
+ ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih3->pkind = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
+ /* PKI IH */
+ ih3->fsz = OCTEON_PCI_CMD_O3;
+
+ if (!setup->s.gather) {
+ ih3->dlengsz = setup->s.u.datasize;
+ } else {
+ ih3->gather = 1;
+ ih3->dlengsz = setup->s.u.gatherptrs;
+ }
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 0;
+ pki_ih3->utag = 0;
+ pki_ih3->utt = 1;
+ pki_ih3->uqpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
+
+ port = (int)lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.port;
+
+ if (tag)
+ pki_ih3->tag = tag;
+ else
+ pki_ih3->tag = LIO_DATA(port);
+
+ pki_ih3->tagtype = OCTEON_ORDERED_TAG;
+ pki_ih3->qpg = lio_dev->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x0; /* parse from L2 */
+ pki_ih3->sl = 32; /* sl will be sizeof(pki_ih3) + irh + ossp0 + ossp1*/
+
+ irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
+
+ irh->opcode = LIO_OPCODE;
+ irh->subcode = LIO_OPCODE_NW_DATA;
+
+ packet_params.pkt_params32 = 0;
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.pkt_params32;
+}
+
+int lio_setup_sc_buffer_pool(struct lio_device *lio_dev);
+void lio_free_sc_buffer_pool(struct lio_device *lio_dev);
+
+struct lio_soft_command *
+lio_alloc_soft_command(struct lio_device *lio_dev,
+ uint32_t datasize, uint32_t rdatasize,
+ uint32_t ctxsize);
+void lio_prepare_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc,
+ uint8_t opcode, uint8_t subcode,
+ uint32_t irh_ossp, uint64_t ossp0,
+ uint64_t ossp1);
+int lio_send_soft_command(struct lio_device *lio_dev,
+ struct lio_soft_command *sc);
+void lio_free_soft_command(struct lio_soft_command *sc);
+
+/** Send control packet to the device
+ * @param lio_dev - lio device pointer
+ * @param nctrl - control structure with command, timeout, and callback info
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and LIO_IQ_SEND_OK if it sent okay.
+ */
+int lio_send_ctrl_pkt(struct lio_device *lio_dev,
+ struct lio_ctrl_pkt *ctrl_pkt);
+
+/** Maximum ordered requests to process in every invocation of
+ * lio_process_ordered_list(). The function will continue to process requests
+ * as long as it can find one that has finished processing. If it keeps
+ * finding requests that have completed, the function can run for ever. The
+ * value defined here sets an upper limit on the number of requests it can
+ * process before it returns control to the poll thread.
+ */
+#define LIO_MAX_ORD_REQS_TO_PROCESS 4096
+
+/** Error codes used in Octeon Host-Core communication.
+ *
+ * 31 16 15 0
+ * ----------------------------
+ * | | |
+ * ----------------------------
+ * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
+ * are reserved to identify the group to which the error code belongs. The
+ * lower 16-bits, called Minor Error Number, carry the actual code.
+ *
+ * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
+ */
+/** Status for a request.
+ * If the request is successfully queued, the driver will return
+ * a LIO_REQUEST_PENDING status. LIO_REQUEST_TIMEOUT is only returned by
+ * the driver if the response for request failed to arrive before a
+ * time-out period or if the request processing * got interrupted due to
+ * a signal respectively.
+ */
+enum {
+ /** A value of 0x00000000 indicates no error i.e. success */
+ LIO_REQUEST_DONE = 0x00000000,
+ /** (Major number: 0x0000; Minor Number: 0x0001) */
+ LIO_REQUEST_PENDING = 0x00000001,
+ LIO_REQUEST_TIMEOUT = 0x00000003,
+
+};
+
+/*------ Error codes used by firmware (bits 15..0 set by firmware */
+#define LIO_FIRMWARE_MAJOR_ERROR_CODE 0x0001
+#define LIO_FIRMWARE_STATUS_CODE(status) \
+ ((LIO_FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
+
+/** Initialize the response lists. The number of response lists to create is
+ * given by count.
+ * @param lio_dev - the lio device structure.
+ */
+void lio_setup_response_list(struct lio_device *lio_dev);
+
+/** Check the status of first entry in the ordered list. If the instruction at
+ * that entry finished processing or has timed-out, the entry is cleaned.
+ * @param lio_dev - the lio device structure.
+ * @return 1 if the ordered list is empty, 0 otherwise.
+ */
+int lio_process_ordered_list(struct lio_device *lio_dev);
+
+#define LIO_INCR_INSTRQUEUE_PKT_COUNT(lio_dev, iq_no, field, count) \
+ (((lio_dev)->instr_queue[iq_no]->stats.field) += count)
+
+static inline void
+lio_swap_8B_data(uint64_t *data, uint32_t blocks)
+{
+ while (blocks) {
+ *data = rte_cpu_to_be_64(*data);
+ blocks--;
+ data++;
+ }
+}
+
+static inline uint64_t
+lio_map_ring(void *buf)
+{
+ phys_addr_t dma_addr;
+
+ dma_addr = rte_mbuf_data_dma_addr_default(((struct rte_mbuf *)buf));
+
+ return (uint64_t)dma_addr;
+}
+
+static inline uint64_t
+lio_map_ring_info(struct lio_droq *droq, uint32_t i)
+{
+ phys_addr_t dma_addr;
+
+ dma_addr = droq->info_list_dma + (i * LIO_DROQ_INFO_SIZE);
+
+ return (uint64_t)dma_addr;
+}
+
+static inline int
+lio_opcode_slow_path(union octeon_rh *rh)
+{
+ uint16_t subcode1, subcode2;
+
+ subcode1 = LIO_OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode);
+ subcode2 = LIO_OPCODE_SUBCODE(LIO_OPCODE, LIO_OPCODE_NW_DATA);
+
+ return subcode2 != subcode1;
+}
+
+static inline void
+lio_add_sg_size(struct lio_sg_entry *sg_entry,
+ uint16_t size, uint32_t pos)
+{
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ sg_entry->u.size[pos] = size;
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ sg_entry->u.size[3 - pos] = size;
+#endif
+}
+
+/* Macro to increment index.
+ * Index is incremented by count; if the sum exceeds
+ * max, index is wrapped-around to the start.
+ */
+static inline uint32_t
+lio_incr_index(uint32_t index, uint32_t count, uint32_t max)
+{
+ if ((index + count) >= max)
+ index = index + count - max;
+ else
+ index += count;
+
+ return index;
+}
+
+int lio_setup_droq(struct lio_device *lio_dev, int q_no, int num_descs,
+ int desc_size, struct rte_mempool *mpool,
+ unsigned int socket_id);
+uint16_t lio_dev_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t budget);
+void lio_delete_droq_queue(struct lio_device *lio_dev, int oq_no);
+
+void lio_delete_sglist(struct lio_instr_queue *txq);
+int lio_setup_sglists(struct lio_device *lio_dev, int iq_no,
+ int fw_mapped_iq, int num_descs, unsigned int socket_id);
+uint16_t lio_dev_xmit_pkts(void *tx_queue, struct rte_mbuf **pkts,
+ uint16_t nb_pkts);
+int lio_wait_for_instr_fetch(struct lio_device *lio_dev);
+int lio_setup_iq(struct lio_device *lio_dev, int q_index,
+ union octeon_txpciq iq_no, uint32_t num_descs, void *app_ctx,
+ unsigned int socket_id);
+int lio_flush_iq(struct lio_device *lio_dev, struct lio_instr_queue *iq);
+void lio_delete_instruction_queue(struct lio_device *lio_dev, int iq_no);
+/** Setup instruction queue zero for the device
+ * @param lio_dev which lio device to setup
+ *
+ * @return 0 if success. -1 if fails
+ */
+int lio_setup_instr_queue0(struct lio_device *lio_dev);
+void lio_free_instr_queue0(struct lio_device *lio_dev);
+void lio_dev_clear_queues(struct rte_eth_dev *eth_dev);
+#endif /* _LIO_RXTX_H_ */
diff --git a/drivers/net/liquidio/lio_struct.h b/drivers/net/liquidio/lio_struct.h
new file mode 100644
index 00000000..26f803f9
--- /dev/null
+++ b/drivers/net/liquidio/lio_struct.h
@@ -0,0 +1,689 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LIO_STRUCT_H_
+#define _LIO_STRUCT_H_
+
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <rte_spinlock.h>
+#include <rte_atomic.h>
+
+#include "lio_hw_defs.h"
+
+struct lio_stailq_node {
+ STAILQ_ENTRY(lio_stailq_node) entries;
+};
+
+STAILQ_HEAD(lio_stailq_head, lio_stailq_node);
+
+struct lio_version {
+ uint16_t major;
+ uint16_t minor;
+ uint16_t micro;
+ uint16_t reserved;
+};
+
+/** Input Queue statistics. Each input queue has four stats fields. */
+struct lio_iq_stats {
+ uint64_t instr_posted; /**< Instructions posted to this queue. */
+ uint64_t instr_processed; /**< Instructions processed in this queue. */
+ uint64_t instr_dropped; /**< Instructions that could not be processed */
+ uint64_t bytes_sent; /**< Bytes sent through this queue. */
+ uint64_t tx_done; /**< Num of packets sent to network. */
+ uint64_t tx_iq_busy; /**< Num of times this iq was found to be full. */
+ uint64_t tx_dropped; /**< Num of pkts dropped due to xmitpath errors. */
+ uint64_t tx_tot_bytes; /**< Total count of bytes sent to network. */
+};
+
+/** Output Queue statistics. Each output queue has four stats fields. */
+struct lio_droq_stats {
+ /** Number of packets received in this queue. */
+ uint64_t pkts_received;
+
+ /** Bytes received by this queue. */
+ uint64_t bytes_received;
+
+ /** Packets dropped due to no memory available. */
+ uint64_t dropped_nomem;
+
+ /** Packets dropped due to large number of pkts to process. */
+ uint64_t dropped_toomany;
+
+ /** Number of packets sent to stack from this queue. */
+ uint64_t rx_pkts_received;
+
+ /** Number of Bytes sent to stack from this queue. */
+ uint64_t rx_bytes_received;
+
+ /** Num of Packets dropped due to receive path failures. */
+ uint64_t rx_dropped;
+
+ /** Num of vxlan packets received; */
+ uint64_t rx_vxlan;
+
+ /** Num of failures of lio_recv_buffer_alloc() */
+ uint64_t rx_alloc_failure;
+
+};
+
+/** The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * DROQ.
+ */
+struct lio_droq {
+ /** A spinlock to protect access to this ring. */
+ rte_spinlock_t lock;
+
+ uint32_t q_no;
+
+ uint32_t pkt_count;
+
+ struct lio_device *lio_dev;
+
+ /** The 8B aligned descriptor ring starts at this address. */
+ struct lio_droq_desc *desc_ring;
+
+ /** Index in the ring where the driver should read the next packet */
+ uint32_t read_idx;
+
+ /** Index in the ring where Octeon will write the next packet */
+ uint32_t write_idx;
+
+ /** Index in the ring where the driver will refill the descriptor's
+ * buffer
+ */
+ uint32_t refill_idx;
+
+ /** Packets pending to be processed */
+ rte_atomic64_t pkts_pending;
+
+ /** Number of descriptors in this ring. */
+ uint32_t max_count;
+
+ /** The number of descriptors pending refill. */
+ uint32_t refill_count;
+
+ uint32_t refill_threshold;
+
+ /** The 8B aligned info ptrs begin from this address. */
+ struct lio_droq_info *info_list;
+
+ /** The receive buffer list. This list has the virtual addresses of the
+ * buffers.
+ */
+ struct lio_recv_buffer *recv_buf_list;
+
+ /** The size of each buffer pointed by the buffer pointer. */
+ uint32_t buffer_size;
+
+ /** Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ void *pkts_credit_reg;
+
+ /** Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ void *pkts_sent_reg;
+
+ /** Statistics for this DROQ. */
+ struct lio_droq_stats stats;
+
+ /** DMA mapped address of the DROQ descriptor ring. */
+ size_t desc_ring_dma;
+
+ /** Info ptr list are allocated at this virtual address. */
+ size_t info_base_addr;
+
+ /** DMA mapped address of the info list */
+ size_t info_list_dma;
+
+ /** Allocated size of info list. */
+ uint32_t info_alloc_size;
+
+ /** Memory zone **/
+ const struct rte_memzone *desc_ring_mz;
+ const struct rte_memzone *info_mz;
+ struct rte_mempool *mpool;
+};
+
+/** Receive Header */
+union octeon_rh {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t rh64;
+ struct {
+ uint64_t opcode : 4;
+ uint64_t subcode : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t reserved : 17;
+ uint64_t ossp : 32; /** opcode/subcode specific parameters */
+ } r;
+ struct {
+ uint64_t opcode : 4;
+ uint64_t subcode : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t extra : 28;
+ uint64_t vlan : 12;
+ uint64_t priority : 3;
+ uint64_t csum_verified : 3; /** checksum verified. */
+ uint64_t has_hwtstamp : 1; /** Has hardware timestamp.1 = yes.*/
+ uint64_t encap_on : 1;
+ uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
+ } r_dh;
+ struct {
+ uint64_t opcode : 4;
+ uint64_t subcode : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t reserved : 8;
+ uint64_t extra : 25;
+ uint64_t gmxport : 16;
+ } r_nic_info;
+#else
+ uint64_t rh64;
+ struct {
+ uint64_t ossp : 32; /** opcode/subcode specific parameters */
+ uint64_t reserved : 17;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t subcode : 8;
+ uint64_t opcode : 4;
+ } r;
+ struct {
+ uint64_t has_hash : 1; /** Has hash (rth or rss). 1 = yes. */
+ uint64_t encap_on : 1;
+ uint64_t has_hwtstamp : 1; /** 1 = has hwtstamp */
+ uint64_t csum_verified : 3; /** checksum verified. */
+ uint64_t priority : 3;
+ uint64_t vlan : 12;
+ uint64_t extra : 28;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t subcode : 8;
+ uint64_t opcode : 4;
+ } r_dh;
+ struct {
+ uint64_t gmxport : 16;
+ uint64_t extra : 25;
+ uint64_t reserved : 8;
+ uint64_t len : 3; /** additional 64-bit words */
+ uint64_t subcode : 8;
+ uint64_t opcode : 4;
+ } r_nic_info;
+#endif
+};
+
+#define OCTEON_RH_SIZE (sizeof(union octeon_rh))
+
+/** The txpciq info passed to host from the firmware */
+union octeon_txpciq {
+ uint64_t txpciq64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t q_no : 8;
+ uint64_t port : 8;
+ uint64_t pkind : 6;
+ uint64_t use_qpg : 1;
+ uint64_t qpg : 11;
+ uint64_t aura_num : 10;
+ uint64_t reserved : 20;
+#else
+ uint64_t reserved : 20;
+ uint64_t aura_num : 10;
+ uint64_t qpg : 11;
+ uint64_t use_qpg : 1;
+ uint64_t pkind : 6;
+ uint64_t port : 8;
+ uint64_t q_no : 8;
+#endif
+ } s;
+};
+
+/** The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue for
+ * a LIO device has one such structure to represent it.
+ */
+struct lio_instr_queue {
+ /** A spinlock to protect access to the input ring. */
+ rte_spinlock_t lock;
+
+ rte_spinlock_t post_lock;
+
+ struct lio_device *lio_dev;
+
+ uint32_t pkt_in_done;
+
+ rte_atomic64_t iq_flush_running;
+
+ /** Flag that indicates if the queue uses 64 byte commands. */
+ uint32_t iqcmd_64B:1;
+
+ /** Queue info. */
+ union octeon_txpciq txpciq;
+
+ uint32_t rsvd:17;
+
+ uint32_t status:8;
+
+ /** Maximum no. of instructions in this queue. */
+ uint32_t max_count;
+
+ /** Index in input ring where the driver should write the next packet */
+ uint32_t host_write_index;
+
+ /** Index in input ring where Octeon is expected to read the next
+ * packet.
+ */
+ uint32_t lio_read_index;
+
+ /** This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ uint32_t flush_index;
+
+ /** This field keeps track of the instructions pending in this queue. */
+ rte_atomic64_t instr_pending;
+
+ /** Pointer to the Virtual Base addr of the input ring. */
+ uint8_t *base_addr;
+
+ struct lio_request_list *request_list;
+
+ /** Octeon doorbell register for the ring. */
+ void *doorbell_reg;
+
+ /** Octeon instruction count register for this ring. */
+ void *inst_cnt_reg;
+
+ /** Number of instructions pending to be posted to Octeon. */
+ uint32_t fill_cnt;
+
+ /** Statistics for this input queue. */
+ struct lio_iq_stats stats;
+
+ /** DMA mapped base address of the input descriptor ring. */
+ uint64_t base_addr_dma;
+
+ /** Application context */
+ void *app_ctx;
+
+ /* network stack queue index */
+ int q_index;
+
+ /* Memory zone */
+ const struct rte_memzone *iq_mz;
+};
+
+/** This structure is used by driver to store information required
+ * to free the mbuff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
+ */
+struct lio_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio_device *lio_dev;
+
+ /** Bytes 9-16. Pointer to mbuff. */
+ struct rte_mbuf *mbuf;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct lio_gather *g;
+
+ /** Bytes 25-32. Physical address of mbuf->data or gather list. */
+ uint64_t dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct lio_soft_command *sc;
+
+ /** Bytes 48-63. iq no */
+ uint64_t iq_no;
+};
+
+/* The Scatter-Gather List Entry. The scatter or gather component used with
+ * input instruction has this format.
+ */
+struct lio_sg_entry {
+ /** The first 64 bit gives the size of data in each dptr. */
+ union {
+ uint16_t size[4];
+ uint64_t size64;
+ } u;
+
+ /** The 4 dptr pointers for this entry. */
+ uint64_t ptr[4];
+};
+
+#define LIO_SG_ENTRY_SIZE (sizeof(struct lio_sg_entry))
+
+/** Structure of a node in list of gather components maintained by
+ * driver for each network device.
+ */
+struct lio_gather {
+ /** List manipulation. Next and prev pointers. */
+ struct lio_stailq_node list;
+
+ /** Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /** Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /** Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct lio_sg_entry *sg;
+};
+
+struct lio_rss_ctx {
+ uint16_t hash_key_size;
+ uint8_t hash_key[LIO_RSS_MAX_KEY_SZ];
+ /* Ideally a factor of number of queues */
+ uint8_t itable[LIO_RSS_MAX_TABLE_SZ];
+ uint8_t itable_size;
+ uint8_t ip;
+ uint8_t tcp_hash;
+ uint8_t ipv6;
+ uint8_t ipv6_tcp_hash;
+ uint8_t ipv6_ex;
+ uint8_t ipv6_tcp_ex_hash;
+ uint8_t hash_disable;
+};
+
+struct lio_io_enable {
+ uint64_t iq;
+ uint64_t oq;
+ uint64_t iq64B;
+};
+
+struct lio_fn_list {
+ void (*setup_iq_regs)(struct lio_device *, uint32_t);
+ void (*setup_oq_regs)(struct lio_device *, uint32_t);
+
+ int (*setup_mbox)(struct lio_device *);
+ void (*free_mbox)(struct lio_device *);
+
+ int (*setup_device_regs)(struct lio_device *);
+ int (*enable_io_queues)(struct lio_device *);
+ void (*disable_io_queues)(struct lio_device *);
+};
+
+struct lio_pf_vf_hs_word {
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /** PKIND value assigned for the DPI interface */
+ uint64_t pkind : 8;
+
+ /** OCTEON core clock multiplier */
+ uint64_t core_tics_per_us : 16;
+
+ /** OCTEON coprocessor clock multiplier */
+ uint64_t coproc_tics_per_us : 16;
+
+ /** app that currently running on OCTEON */
+ uint64_t app_mode : 8;
+
+ /** RESERVED */
+ uint64_t reserved : 16;
+
+#elif RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+
+ /** RESERVED */
+ uint64_t reserved : 16;
+
+ /** app that currently running on OCTEON */
+ uint64_t app_mode : 8;
+
+ /** OCTEON coprocessor clock multiplier */
+ uint64_t coproc_tics_per_us : 16;
+
+ /** OCTEON core clock multiplier */
+ uint64_t core_tics_per_us : 16;
+
+ /** PKIND value assigned for the DPI interface */
+ uint64_t pkind : 8;
+#endif
+};
+
+struct lio_sriov_info {
+ /** Number of rings assigned to VF */
+ uint32_t rings_per_vf;
+
+ /** Number of VF devices enabled */
+ uint32_t num_vfs;
+};
+
+/* Head of a response list */
+struct lio_response_list {
+ /** List structure to add delete pending entries to */
+ struct lio_stailq_head head;
+
+ /** A lock for this response list */
+ rte_spinlock_t lock;
+
+ rte_atomic64_t pending_req_count;
+};
+
+/* Structure to define the configuration attributes for each Input queue. */
+struct lio_iq_config {
+ /* Max number of IQs available */
+ uint8_t max_iqs;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ uint32_t pending_list_size;
+
+ /** Command size - 32 or 64 bytes */
+ uint32_t instr_type;
+};
+
+/* Structure to define the configuration attributes for each Output queue. */
+struct lio_oq_config {
+ /* Max number of OQs available */
+ uint8_t max_oqs;
+
+ /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ uint32_t info_ptr;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish the descriptor ring with new buffers.
+ */
+ uint32_t refill_threshold;
+};
+
+/* Structure to define the configuration. */
+struct lio_config {
+ uint16_t card_type;
+ const char *card_name;
+
+ /** Input Queue attributes. */
+ struct lio_iq_config iq;
+
+ /** Output Queue attributes. */
+ struct lio_oq_config oq;
+
+ int num_nic_ports;
+
+ int num_def_tx_descs;
+
+ /* Num of desc for rx rings */
+ int num_def_rx_descs;
+
+ int def_rx_buf_size;
+};
+
+/** Status of a RGMII Link on Octeon as seen by core driver. */
+union octeon_link_status {
+ uint64_t link_status64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t duplex : 8;
+ uint64_t mtu : 16;
+ uint64_t speed : 16;
+ uint64_t link_up : 1;
+ uint64_t autoneg : 1;
+ uint64_t if_mode : 5;
+ uint64_t pause : 1;
+ uint64_t flashing : 1;
+ uint64_t reserved : 15;
+#else
+ uint64_t reserved : 15;
+ uint64_t flashing : 1;
+ uint64_t pause : 1;
+ uint64_t if_mode : 5;
+ uint64_t autoneg : 1;
+ uint64_t link_up : 1;
+ uint64_t speed : 16;
+ uint64_t mtu : 16;
+ uint64_t duplex : 8;
+#endif
+ } s;
+};
+
+/** The rxpciq info passed to host from the firmware */
+union octeon_rxpciq {
+ uint64_t rxpciq64;
+
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t q_no : 8;
+ uint64_t reserved : 56;
+#else
+ uint64_t reserved : 56;
+ uint64_t q_no : 8;
+#endif
+ } s;
+};
+
+/** Information for a OCTEON ethernet interface shared between core & host. */
+struct octeon_link_info {
+ union octeon_link_status link;
+ uint64_t hw_addr;
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint64_t gmxport : 16;
+ uint64_t macaddr_is_admin_assigned : 1;
+ uint64_t vlan_is_admin_assigned : 1;
+ uint64_t rsvd : 30;
+ uint64_t num_txpciq : 8;
+ uint64_t num_rxpciq : 8;
+#else
+ uint64_t num_rxpciq : 8;
+ uint64_t num_txpciq : 8;
+ uint64_t rsvd : 30;
+ uint64_t vlan_is_admin_assigned : 1;
+ uint64_t macaddr_is_admin_assigned : 1;
+ uint64_t gmxport : 16;
+#endif
+
+ union octeon_txpciq txpciq[LIO_MAX_IOQS_PER_IF];
+ union octeon_rxpciq rxpciq[LIO_MAX_IOQS_PER_IF];
+};
+
+/* ----------------------- THE LIO DEVICE --------------------------- */
+/** The lio device.
+ * Each lio device has this structure to represent all its
+ * components.
+ */
+struct lio_device {
+ /** PCI device pointer */
+ struct rte_pci_device *pci_dev;
+
+ /** Octeon Chip type */
+ uint16_t chip_id;
+ uint16_t pf_num;
+ uint16_t vf_num;
+
+ /** This device's PCIe port used for traffic. */
+ uint16_t pcie_port;
+
+ /** The state of this device */
+ rte_atomic64_t status;
+
+ uint8_t intf_open;
+
+ struct octeon_link_info linfo;
+
+ uint8_t *hw_addr;
+
+ struct lio_fn_list fn_list;
+
+ uint32_t num_iqs;
+
+ /** Guards each glist */
+ rte_spinlock_t *glist_lock;
+ /** Array of gather component linked lists */
+ struct lio_stailq_head *glist_head;
+
+ /* The pool containing pre allocated buffers used for soft commands */
+ struct rte_mempool *sc_buf_pool;
+
+ /** The input instruction queues */
+ struct lio_instr_queue *instr_queue[LIO_MAX_POSSIBLE_INSTR_QUEUES];
+
+ /** The singly-linked tail queues of instruction response */
+ struct lio_response_list response_list;
+
+ uint32_t num_oqs;
+
+ /** The DROQ output queues */
+ struct lio_droq *droq[LIO_MAX_POSSIBLE_OUTPUT_QUEUES];
+
+ struct lio_io_enable io_qmask;
+
+ struct lio_sriov_info sriov_info;
+
+ struct lio_pf_vf_hs_word pfvf_hsword;
+
+ /** Mail Box details of each lio queue. */
+ struct lio_mbox **mbox;
+
+ char dev_string[LIO_DEVICE_NAME_LEN]; /* Device print string */
+
+ const struct lio_config *default_config;
+
+ struct rte_eth_dev *eth_dev;
+
+ uint64_t ifflags;
+ uint8_t max_rx_queues;
+ uint8_t max_tx_queues;
+ uint8_t nb_rx_queues;
+ uint8_t nb_tx_queues;
+ uint8_t port_configured;
+ struct lio_rss_ctx rss_state;
+ uint8_t port_id;
+};
+#endif /* _LIO_STRUCT_H_ */
diff --git a/drivers/net/liquidio/rte_pmd_lio_version.map b/drivers/net/liquidio/rte_pmd_lio_version.map
new file mode 100644
index 00000000..8591cc0b
--- /dev/null
+++ b/drivers/net/liquidio/rte_pmd_lio_version.map
@@ -0,0 +1,4 @@
+DPDK_17.05 {
+
+ local: *;
+};
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index efed953e..e873fb48 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -36,12 +36,7 @@ LIB = librte_pmd_mlx4.a
# Sources.
SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4.c
-
-# Dependencies.
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/librte_mempool
+SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_flow.c
# Basic CFLAGS.
CFLAGS += -O3
@@ -102,7 +97,7 @@ endif
mlx4_autoconf.h.new: FORCE
-mlx4_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh
+mlx4_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
RSS_SUPPORT \
@@ -129,7 +124,7 @@ mlx4_autoconf.h: mlx4_autoconf.h.new
cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
mv '$<' '$@'
-mlx4.o: mlx4_autoconf.h
+$(SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD):.c=.o): mlx4_autoconf.h
clean_mlx4: FORCE
$Q rm -f -- mlx4_autoconf.h mlx4_autoconf.h.new
diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c
index 6d43a977..ec4419a8 100644
--- a/drivers/net/mlx4/mlx4.c
+++ b/drivers/net/mlx4/mlx4.c
@@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
- * Copyright 2012-2015 6WIND S.A.
- * Copyright 2012 Mellanox.
+ * Copyright 2012-2017 6WIND S.A.
+ * Copyright 2012-2017 Mellanox.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -58,22 +58,9 @@
#include <linux/sockios.h>
#include <fcntl.h>
-/* Verbs header. */
-/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
-#include <infiniband/verbs.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
-
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_dev.h>
#include <rte_mbuf.h>
#include <rte_errno.h>
@@ -86,30 +73,15 @@
#include <rte_log.h>
#include <rte_alarm.h>
#include <rte_memory.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
+#include <rte_flow.h>
+#include <rte_kvargs.h>
/* Generated configuration header. */
#include "mlx4_autoconf.h"
-/* PMD header. */
+/* PMD headers. */
#include "mlx4.h"
-
-/* Runtime logging through RTE_LOG() is enabled when not in debugging mode.
- * Intermediate LOG_*() macros add the required end-of-line characters. */
-#ifndef NDEBUG
-#define INFO(...) DEBUG(__VA_ARGS__)
-#define WARN(...) DEBUG(__VA_ARGS__)
-#define ERROR(...) DEBUG(__VA_ARGS__)
-#else
-#define LOG__(level, m, ...) \
- RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__)
-#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
-#define INFO(...) LOG_(INFO, __VA_ARGS__)
-#define WARN(...) LOG_(WARNING, __VA_ARGS__)
-#define ERROR(...) LOG_(ERR, __VA_ARGS__)
-#endif
+#include "mlx4_flow.h"
/* Convenience macros for accessing mbuf fields. */
#define NEXT(m) ((m)->next)
@@ -137,157 +109,6 @@ typedef union {
(((val) & (from)) / ((from) / (to))) : \
(((val) & (from)) * ((to) / (from))))
-struct mlx4_rxq_stats {
- unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
- uint64_t ipackets; /**< Total of successfully received packets. */
- uint64_t ibytes; /**< Total of successfully received bytes. */
-#endif
- uint64_t idropped; /**< Total of packets dropped when RX ring full. */
- uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
-};
-
-struct mlx4_txq_stats {
- unsigned int idx; /**< Mapping index. */
-#ifdef MLX4_PMD_SOFT_COUNTERS
- uint64_t opackets; /**< Total of successfully sent packets. */
- uint64_t obytes; /**< Total of successfully sent bytes. */
-#endif
- uint64_t odropped; /**< Total of packets not sent when TX ring full. */
-};
-
-/* RX element (scattered packets). */
-struct rxq_elt_sp {
- struct ibv_recv_wr wr; /* Work Request. */
- struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
- struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */
-};
-
-/* RX element. */
-struct rxq_elt {
- struct ibv_recv_wr wr; /* Work Request. */
- struct ibv_sge sge; /* Scatter/Gather Element. */
- /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
-};
-
-/* RX queue descriptor. */
-struct rxq {
- struct priv *priv; /* Back pointer to private data. */
- struct rte_mempool *mp; /* Memory Pool for allocations. */
- struct ibv_mr *mr; /* Memory Region (for mp). */
- struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_qp *qp; /* Queue Pair. */
- struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
- /*
- * Each VLAN ID requires a separate flow steering rule.
- */
- BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
- struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS];
- struct ibv_flow *promisc_flow; /* Promiscuous flow. */
- struct ibv_flow *allmulti_flow; /* Multicast flow. */
- unsigned int port_id; /* Port ID for incoming packets. */
- unsigned int elts_n; /* (*elts)[] length. */
- unsigned int elts_head; /* Current index in (*elts)[]. */
- union {
- struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
- struct rxq_elt (*no_sp)[]; /* RX elements. */
- } elts;
- unsigned int sp:1; /* Use scattered RX elements. */
- unsigned int csum:1; /* Enable checksum offloading. */
- unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
- struct mlx4_rxq_stats stats; /* RX queue counters. */
- unsigned int socket; /* CPU socket ID for allocations. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
-};
-
-/* TX element. */
-struct txq_elt {
- struct rte_mbuf *buf;
-};
-
-/* Linear buffer type. It is used when transmitting buffers with too many
- * segments that do not fit the hardware queue (see max_send_sge).
- * Extra segments are copied (linearized) in such buffers, replacing the
- * last SGE during TX.
- * The size is arbitrary but large enough to hold a jumbo frame with
- * 8 segments considering mbuf.buf_len is about 2048 bytes. */
-typedef uint8_t linear_t[16384];
-
-/* TX queue descriptor. */
-struct txq {
- struct priv *priv; /* Back pointer to private data. */
- struct {
- const struct rte_mempool *mp; /* Cached Memory Pool. */
- struct ibv_mr *mr; /* Memory Region (for mp). */
- uint32_t lkey; /* mr->lkey */
- } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
- struct ibv_cq *cq; /* Completion Queue. */
- struct ibv_qp *qp; /* Queue Pair. */
- struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
-#if MLX4_PMD_MAX_INLINE > 0
- uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
-#endif
- unsigned int elts_n; /* (*elts)[] length. */
- struct txq_elt (*elts)[]; /* TX elements. */
- unsigned int elts_head; /* Current index in (*elts)[]. */
- unsigned int elts_tail; /* First element awaiting completion. */
- unsigned int elts_comp; /* Number of completion requests. */
- unsigned int elts_comp_cd; /* Countdown for next completion request. */
- unsigned int elts_comp_cd_init; /* Initial value for countdown. */
- struct mlx4_txq_stats stats; /* TX queue counters. */
- linear_t (*elts_linear)[]; /* Linearized buffers. */
- struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
- unsigned int socket; /* CPU socket ID for allocations. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
-};
-
-struct priv {
- struct rte_eth_dev *dev; /* Ethernet device. */
- struct ibv_context *ctx; /* Verbs context. */
- struct ibv_device_attr device_attr; /* Device properties. */
- struct ibv_pd *pd; /* Protection Domain. */
- /*
- * MAC addresses array and configuration bit-field.
- * An extra entry that cannot be modified by the DPDK is reserved
- * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
- */
- struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
- BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
- /* VLAN filters. */
- struct {
- unsigned int enabled:1; /* If enabled. */
- unsigned int id:12; /* VLAN ID (0-4095). */
- } vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */
- /* Device properties. */
- uint16_t mtu; /* Configured MTU. */
- uint8_t port; /* Physical port number. */
- unsigned int started:1; /* Device started, flows enabled. */
- unsigned int promisc:1; /* Device in promiscuous mode. */
- unsigned int allmulti:1; /* Device receives all multicast packets. */
- unsigned int hw_qpg:1; /* QP groups are supported. */
- unsigned int hw_tss:1; /* TSS is supported. */
- unsigned int hw_rss:1; /* RSS is supported. */
- unsigned int hw_csum:1; /* Checksum offload is supported. */
- unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
- unsigned int rss:1; /* RSS is enabled. */
- unsigned int vf:1; /* This is a VF device. */
- unsigned int pending_alarm:1; /* An alarm is pending. */
-#ifdef INLINE_RECV
- unsigned int inl_recv_size; /* Inline recv size */
-#endif
- unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
- /* RX/TX queues. */
- struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
- unsigned int rxqs_n; /* RX queues array size. */
- unsigned int txqs_n; /* TX queues array size. */
- struct rxq *(*rxqs)[]; /* RX queues. */
- struct txq *(*txqs)[]; /* TX queues. */
- struct rte_intr_handle intr_handle; /* Interrupt handler. */
- rte_spinlock_t lock; /* Lock for control functions. */
-};
-
/* Local storage for secondary process data. */
struct mlx4_secondary_data {
struct rte_eth_dev_data data; /* Local device data. */
@@ -296,6 +117,16 @@ struct mlx4_secondary_data {
rte_spinlock_t lock; /* Port configuration lock. */
} mlx4_secondary_data[RTE_MAX_ETHPORTS];
+struct mlx4_conf {
+ uint8_t active_ports;
+};
+
+/* Available parameters list. */
+const char *pmd_mlx4_init_params[] = {
+ MLX4_PMD_PORT_KVARG,
+ NULL,
+};
+
/**
* Check if running as a secondary process.
*
@@ -335,8 +166,7 @@ mlx4_get_priv(struct rte_eth_dev *dev)
* @param priv
* Pointer to private structure.
*/
-static void
-priv_lock(struct priv *priv)
+void priv_lock(struct priv *priv)
{
rte_spinlock_lock(&priv->lock);
}
@@ -347,8 +177,7 @@ priv_lock(struct priv *priv)
* @param priv
* Pointer to private structure.
*/
-static void
-priv_unlock(struct priv *priv)
+void priv_unlock(struct priv *priv)
{
rte_spinlock_unlock(&priv->lock);
}
@@ -2526,6 +2355,7 @@ rxq_add_flow(struct rxq *rxq, unsigned int mac_index, unsigned int vlan_index)
assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
*attr = (struct ibv_flow_attr){
.type = IBV_FLOW_ATTR_NORMAL,
+ .priority = 3,
.num_of_specs = 1,
.port = priv->port,
.flags = 0
@@ -3340,6 +3170,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
/* Increase out of memory counters. */
++rxq->stats.rx_nombuf;
++rxq->priv->dev->data->rx_mbuf_alloc_failed;
+ /* Add SGE to array for repost. */
+ sges[i] = elt->sge;
goto repost;
}
@@ -3604,7 +3436,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
}
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
+ if (dev->data->dev_conf.rxmode.enable_scatter &&
(dev->data->dev_conf.rxmode.max_rx_pkt_len >
(mb_len - RTE_PKTMBUF_HEADROOM))) {
tmpl.sp = 1;
@@ -3826,11 +3658,19 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- (mb_len - RTE_PKTMBUF_HEADROOM))) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ tmpl.sp = 0;
+ } else if (dev->data->dev_conf.rxmode.enable_scatter) {
tmpl.sp = 1;
desc /= MLX4_PMD_SGE_WR_N;
+ } else {
+ WARN("%p: the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ (void *)dev,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
}
DEBUG("%p: %s scattered packets support (%u WRs)",
(void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc);
@@ -4092,9 +3932,15 @@ mlx4_rx_queue_release(void *dpdk_rxq)
priv_unlock(priv);
}
-static void
+static int
priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+static int
+priv_dev_removal_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+
+static int
+priv_dev_link_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
+
/**
* DPDK callback to start the device.
*
@@ -4113,6 +3959,7 @@ mlx4_dev_start(struct rte_eth_dev *dev)
unsigned int i = 0;
unsigned int r;
struct rxq *rxq;
+ int ret;
if (mlx4_is_secondary())
return -E_RTE_SECONDARY;
@@ -4132,8 +3979,6 @@ mlx4_dev_start(struct rte_eth_dev *dev)
}
/* Iterate only once when RSS is enabled. */
do {
- int ret;
-
/* Ignore nonexistent RX queues. */
if (rxq == NULL)
continue;
@@ -4146,22 +3991,41 @@ mlx4_dev_start(struct rte_eth_dev *dev)
continue;
WARN("%p: QP flow attachment failed: %s",
(void *)dev, strerror(ret));
- /* Rollback. */
- while (i != 0) {
- rxq = (*priv->rxqs)[--i];
- if (rxq != NULL) {
- rxq_allmulticast_disable(rxq);
- rxq_promiscuous_disable(rxq);
- rxq_mac_addrs_del(rxq);
- }
- }
- priv->started = 0;
- priv_unlock(priv);
- return -ret;
+ goto err;
} while ((--r) && ((rxq = (*priv->rxqs)[++i]), i));
- priv_dev_interrupt_handler_install(priv, dev);
+ ret = priv_dev_link_interrupt_handler_install(priv, dev);
+ if (ret) {
+ ERROR("%p: LSC handler install failed",
+ (void *)dev);
+ goto err;
+ }
+ ret = priv_dev_removal_interrupt_handler_install(priv, dev);
+ if (ret) {
+ ERROR("%p: RMV handler install failed",
+ (void *)dev);
+ goto err;
+ }
+ ret = mlx4_priv_flow_start(priv);
+ if (ret) {
+ ERROR("%p: flow start failed: %s",
+ (void *)dev, strerror(ret));
+ goto err;
+ }
priv_unlock(priv);
return 0;
+err:
+ /* Rollback. */
+ while (i != 0) {
+ rxq = (*priv->rxqs)[i--];
+ if (rxq != NULL) {
+ rxq_allmulticast_disable(rxq);
+ rxq_promiscuous_disable(rxq);
+ rxq_mac_addrs_del(rxq);
+ }
+ }
+ priv->started = 0;
+ priv_unlock(priv);
+ return -ret;
}
/**
@@ -4196,6 +4060,7 @@ mlx4_dev_stop(struct rte_eth_dev *dev)
rxq = (*priv->rxqs)[0];
r = priv->rxqs_n;
}
+ mlx4_priv_flow_stop(priv);
/* Iterate only once when RSS is enabled. */
do {
/* Ignore nonexistent RX queues. */
@@ -4258,9 +4123,16 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
return 0;
}
-static void
+static int
priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
+static int
+priv_dev_removal_interrupt_handler_uninstall(struct priv *,
+ struct rte_eth_dev *);
+
+static int
+priv_dev_link_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
+
/**
* DPDK callback to close the device.
*
@@ -4323,7 +4195,8 @@ mlx4_dev_close(struct rte_eth_dev *dev)
claim_zero(ibv_close_device(priv->ctx));
} else
assert(priv->ctx == NULL);
- priv_dev_interrupt_handler_uninstall(priv, dev);
+ priv_dev_removal_interrupt_handler_uninstall(priv, dev);
+ priv_dev_link_interrupt_handler_uninstall(priv, dev);
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
@@ -4427,6 +4300,8 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
if (priv == NULL)
return;
priv_lock(priv);
@@ -4628,26 +4503,30 @@ end:
* @param vmdq
* VMDq pool index to associate address with (ignored).
*/
-static void
+static int
mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq)
{
struct priv *priv = dev->data->dev_private;
+ int re;
if (mlx4_is_secondary())
- return;
+ return -ENOTSUP;
(void)vmdq;
priv_lock(priv);
DEBUG("%p: adding MAC address at index %" PRIu32,
(void *)dev, index);
/* Last array entry is reserved for broadcast. */
- if (index >= (elemof(priv->mac) - 1))
+ if (index >= (elemof(priv->mac) - 1)) {
+ re = EINVAL;
goto end;
- priv_mac_addr_add(priv, index,
- (const uint8_t (*)[ETHER_ADDR_LEN])
- mac_addr->addr_bytes);
+ }
+ re = priv_mac_addr_add(priv, index,
+ (const uint8_t (*)[ETHER_ADDR_LEN])
+ mac_addr->addr_bytes);
end:
priv_unlock(priv);
+ return -re;
}
/**
@@ -4827,7 +4706,7 @@ end:
}
/**
- * DPDK callback to retrieve physical link information (unlocked version).
+ * DPDK callback to retrieve physical link information.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -4835,9 +4714,9 @@ end:
* Wait for request completion (ignored).
*/
static int
-mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
+mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
- struct priv *priv = mlx4_get_priv(dev);
+ const struct priv *priv = mlx4_get_priv(dev);
struct ethtool_cmd edata = {
.cmd = ETHTOOL_GSET
};
@@ -4845,6 +4724,8 @@ mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
struct rte_eth_link dev_link;
int link_speed = 0;
+ /* priv_lock() is not taken to allow concurrent calls. */
+
if (priv == NULL)
return -EINVAL;
(void)wait_to_complete;
@@ -4879,27 +4760,9 @@ mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
return -1;
}
-/**
- * DPDK callback to retrieve physical link information.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
- */
static int
-mlx4_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
- struct priv *priv = mlx4_get_priv(dev);
- int ret;
-
- if (priv == NULL)
- return -EINVAL;
- priv_lock(priv);
- ret = mlx4_link_update_unlocked(dev, wait_to_complete);
- priv_unlock(priv);
- return ret;
-}
+mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
+ struct rte_pci_addr *pci_addr);
/**
* DPDK callback to change the MTU.
@@ -4949,21 +4812,16 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
/* Reconfigure each RX queue. */
for (i = 0; (i != priv->rxqs_n); ++i) {
struct rxq *rxq = (*priv->rxqs)[i];
- unsigned int mb_len;
unsigned int max_frame_len;
- int sp;
if (rxq == NULL)
continue;
- /* Calculate new maximum frame length according to MTU and
- * toggle scattered support (sp) if necessary. */
+ /* Calculate new maximum frame length according to MTU. */
max_frame_len = (priv->mtu + ETHER_HDR_LEN +
(ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN));
- mb_len = rte_pktmbuf_data_room_size(rxq->mp);
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
/* Provide new values to rxq_setup(). */
- dev->data->dev_conf.rxmode.jumbo_frame = sp;
+ dev->data->dev_conf.rxmode.jumbo_frame =
+ (max_frame_len > ETHER_MAX_LEN);
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
ret = rxq_rehash(dev, rxq);
if (ret) {
@@ -5215,6 +5073,55 @@ mlx4_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
return -ret;
}
+const struct rte_flow_ops mlx4_flow_ops = {
+ .validate = mlx4_flow_validate,
+ .create = mlx4_flow_create,
+ .destroy = mlx4_flow_destroy,
+ .flush = mlx4_flow_flush,
+ .query = NULL,
+};
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mlx4_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = EINVAL;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mlx4_flow_ops;
+ return 0;
+ case RTE_ETH_FILTER_FDIR:
+ DEBUG("%p: filter type FDIR is not supported by this PMD",
+ (void *)dev);
+ break;
+ default:
+ ERROR("%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ break;
+ }
+ return -ret;
+}
+
static const struct eth_dev_ops mlx4_dev_ops = {
.dev_configure = mlx4_dev_configure,
.dev_start = mlx4_dev_start,
@@ -5249,6 +5156,7 @@ static const struct eth_dev_ops mlx4_dev_ops = {
.mac_addr_add = mlx4_mac_addr_add,
.mac_addr_set = mlx4_mac_addr_set,
.mtu_set = mlx4_dev_set_mtu,
+ .filter_ctrl = mlx4_dev_filter_ctrl,
};
/**
@@ -5379,35 +5287,44 @@ mlx4_getenv_int(const char *name)
static void
mlx4_dev_link_status_handler(void *);
static void
-mlx4_dev_interrupt_handler(struct rte_intr_handle *, void *);
+mlx4_dev_interrupt_handler(void *);
/**
- * Link status handler.
+ * Link/device status handler.
*
* @param priv
* Pointer to private structure.
* @param dev
* Pointer to the rte_eth_dev structure.
+ * @param events
+ * Pointer to event flags holder.
*
* @return
- * Nonzero if the callback process can be called immediately.
+ * Number of events
*/
static int
-priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
+priv_dev_status_handler(struct priv *priv, struct rte_eth_dev *dev,
+ uint32_t *events)
{
struct ibv_async_event event;
int port_change = 0;
int ret = 0;
+ *events = 0;
/* Read all message and acknowledge them. */
for (;;) {
if (ibv_get_async_event(priv->ctx, &event))
break;
-
- if (event.event_type == IBV_EVENT_PORT_ACTIVE ||
- event.event_type == IBV_EVENT_PORT_ERR)
+ if ((event.event_type == IBV_EVENT_PORT_ACTIVE ||
+ event.event_type == IBV_EVENT_PORT_ERR) &&
+ (priv->intr_conf.lsc == 1)) {
port_change = 1;
- else
+ ret++;
+ } else if (event.event_type == IBV_EVENT_DEVICE_FATAL &&
+ priv->intr_conf.rmv == 1) {
+ *events |= (1 << RTE_ETH_EVENT_INTR_RMV);
+ ret++;
+ } else
DEBUG("event type %d on port %d not handled",
event.event_type, event.element.port_num);
ibv_ack_async_event(&event);
@@ -5417,7 +5334,7 @@ priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
struct rte_eth_link *link = &dev->data->dev_link;
priv->pending_alarm = 0;
- mlx4_link_update_unlocked(dev, 0);
+ mlx4_link_update(dev, 0);
if (((link->link_speed == 0) && link->link_status) ||
((link->link_speed != 0) && !link->link_status)) {
/* Inconsistent status, check again later. */
@@ -5425,8 +5342,9 @@ priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
rte_eal_alarm_set(MLX4_ALARM_TIMEOUT_US,
mlx4_dev_link_status_handler,
dev);
- } else
- ret = 1;
+ } else {
+ *events |= (1 << RTE_ETH_EVENT_INTR_LSC);
+ }
}
return ret;
}
@@ -5442,13 +5360,14 @@ mlx4_dev_link_status_handler(void *arg)
{
struct rte_eth_dev *dev = arg;
struct priv *priv = dev->data->dev_private;
+ uint32_t events;
int ret;
priv_lock(priv);
assert(priv->pending_alarm == 1);
- ret = priv_dev_link_status_handler(priv, dev);
+ ret = priv_dev_status_handler(priv, dev, &events);
priv_unlock(priv);
- if (ret)
+ if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
@@ -5461,18 +5380,31 @@ mlx4_dev_link_status_handler(void *arg)
* Callback argument.
*/
static void
-mlx4_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
+mlx4_dev_interrupt_handler(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
struct priv *priv = dev->data->dev_private;
int ret;
+ uint32_t ev;
+ int i;
- (void)intr_handle;
priv_lock(priv);
- ret = priv_dev_link_status_handler(priv, dev);
+ ret = priv_dev_status_handler(priv, dev, &ev);
priv_unlock(priv);
- if (ret)
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ if (ret > 0) {
+ for (i = RTE_ETH_EVENT_UNKNOWN;
+ i < RTE_ETH_EVENT_MAX;
+ i++) {
+ if (ev & (1 << i)) {
+ ev &= ~(1 << i);
+ _rte_eth_dev_callback_process(dev, i, NULL);
+ ret--;
+ }
+ }
+ if (ret)
+ WARN("%d event%s not processed", ret,
+ (ret > 1 ? "s were" : " was"));
+ }
}
/**
@@ -5482,20 +5414,30 @@ mlx4_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
* Pointer to private structure.
* @param dev
* Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative errno value on failure.
*/
-static void
+static int
priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
{
- if (!dev->data->dev_conf.intr_conf.lsc)
- return;
- rte_intr_callback_unregister(&priv->intr_handle,
- mlx4_dev_interrupt_handler,
- dev);
- if (priv->pending_alarm)
- rte_eal_alarm_cancel(mlx4_dev_link_status_handler, dev);
- priv->pending_alarm = 0;
+ int ret;
+
+ if (priv->intr_conf.lsc ||
+ priv->intr_conf.rmv)
+ return 0;
+ ret = rte_intr_callback_unregister(&priv->intr_handle,
+ mlx4_dev_interrupt_handler,
+ dev);
+ if (ret < 0) {
+ ERROR("rte_intr_callback_unregister failed with %d"
+ "%s%s%s", ret,
+ (errno ? " (errno: " : ""),
+ (errno ? strerror(errno) : ""),
+ (errno ? ")" : ""));
+ }
priv->intr_handle.fd = 0;
priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+ return ret;
}
/**
@@ -5505,30 +5447,229 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
* Pointer to private structure.
* @param dev
* Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative errno value on failure.
*/
-static void
-priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
+static int
+priv_dev_interrupt_handler_install(struct priv *priv,
+ struct rte_eth_dev *dev)
{
- int rc, flags;
+ int flags;
+ int rc;
- if (!dev->data->dev_conf.intr_conf.lsc)
- return;
+ /* Check whether the interrupt handler has already been installed
+ * for either type of interrupt
+ */
+ if (priv->intr_conf.lsc &&
+ priv->intr_conf.rmv &&
+ priv->intr_handle.fd)
+ return 0;
assert(priv->ctx->async_fd > 0);
flags = fcntl(priv->ctx->async_fd, F_GETFL);
rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
if (rc < 0) {
INFO("failed to change file descriptor async event queue");
dev->data->dev_conf.intr_conf.lsc = 0;
+ dev->data->dev_conf.intr_conf.rmv = 0;
+ return -errno;
} else {
priv->intr_handle.fd = priv->ctx->async_fd;
priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
- rte_intr_callback_register(&priv->intr_handle,
- mlx4_dev_interrupt_handler,
- dev);
+ rc = rte_intr_callback_register(&priv->intr_handle,
+ mlx4_dev_interrupt_handler,
+ dev);
+ if (rc) {
+ ERROR("rte_intr_callback_register failed "
+ " (errno: %s)", strerror(errno));
+ return rc;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Uninstall interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative value on error.
+ */
+static int
+priv_dev_removal_interrupt_handler_uninstall(struct priv *priv,
+ struct rte_eth_dev *dev)
+{
+ if (dev->data->dev_conf.intr_conf.rmv) {
+ priv->intr_conf.rmv = 0;
+ return priv_dev_interrupt_handler_uninstall(priv, dev);
+ }
+ return 0;
+}
+
+/**
+ * Uninstall interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative value on error,
+ */
+static int
+priv_dev_link_interrupt_handler_uninstall(struct priv *priv,
+ struct rte_eth_dev *dev)
+{
+ int ret = 0;
+
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ priv->intr_conf.lsc = 0;
+ ret = priv_dev_interrupt_handler_uninstall(priv, dev);
+ if (ret)
+ return ret;
+ }
+ if (priv->pending_alarm)
+ if (rte_eal_alarm_cancel(mlx4_dev_link_status_handler,
+ dev)) {
+ ERROR("rte_eal_alarm_cancel failed "
+ " (errno: %s)", strerror(rte_errno));
+ return -rte_errno;
+ }
+ priv->pending_alarm = 0;
+ return 0;
+}
+
+/**
+ * Install link interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative value on error.
+ */
+static int
+priv_dev_link_interrupt_handler_install(struct priv *priv,
+ struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (dev->data->dev_conf.intr_conf.lsc) {
+ ret = priv_dev_interrupt_handler_install(priv, dev);
+ if (ret)
+ return ret;
+ priv->intr_conf.lsc = 1;
+ }
+ return 0;
+}
+
+/**
+ * Install removal interrupt handler.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param dev
+ * Pointer to the rte_eth_dev structure.
+ * @return
+ * 0 on success, negative value on error.
+ */
+static int
+priv_dev_removal_interrupt_handler_install(struct priv *priv,
+ struct rte_eth_dev *dev)
+{
+ int ret;
+
+ if (dev->data->dev_conf.intr_conf.rmv) {
+ ret = priv_dev_interrupt_handler_install(priv, dev);
+ if (ret)
+ return ret;
+ priv->intr_conf.rmv = 1;
+ }
+ return 0;
+}
+
+/**
+ * Verify and store value for device argument.
+ *
+ * @param[in] key
+ * Key argument to verify.
+ * @param[in] val
+ * Value associated with key.
+ * @param out
+ * User data.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mlx4_arg_parse(const char *key, const char *val, void *out)
+{
+ struct mlx4_conf *conf = out;
+ unsigned long tmp;
+
+ errno = 0;
+ tmp = strtoul(val, NULL, 0);
+ if (errno) {
+ WARN("%s: \"%s\" is not a valid integer", key, val);
+ return -errno;
+ }
+ if (strcmp(MLX4_PMD_PORT_KVARG, key) == 0) {
+ if (tmp >= MLX4_PMD_MAX_PHYS_PORTS) {
+ ERROR("invalid port index %lu (max: %u)",
+ tmp, MLX4_PMD_MAX_PHYS_PORTS - 1);
+ return -EINVAL;
+ }
+ conf->active_ports |= 1 << tmp;
+ } else {
+ WARN("%s: unknown parameter", key);
+ return -EINVAL;
}
+ return 0;
}
-static struct eth_driver mlx4_driver;
+/**
+ * Parse device parameters.
+ *
+ * @param devargs
+ * Device arguments structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+static int
+mlx4_args(struct rte_devargs *devargs, struct mlx4_conf *conf)
+{
+ struct rte_kvargs *kvlist;
+ unsigned int arg_count;
+ int ret = 0;
+ int i;
+
+ if (devargs == NULL)
+ return 0;
+ kvlist = rte_kvargs_parse(devargs->args, pmd_mlx4_init_params);
+ if (kvlist == NULL) {
+ ERROR("failed to parse kvargs");
+ return -EINVAL;
+ }
+ /* Process parameters. */
+ for (i = 0; pmd_mlx4_init_params[i]; ++i) {
+ arg_count = rte_kvargs_count(kvlist, MLX4_PMD_PORT_KVARG);
+ while (arg_count-- > 0) {
+ ret = rte_kvargs_process(kvlist, MLX4_PMD_PORT_KVARG,
+ mlx4_arg_parse, conf);
+ if (ret != 0)
+ goto free_kvlist;
+ }
+ }
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static struct rte_pci_driver mlx4_driver;
/**
* DPDK callback to register a PCI device.
@@ -5552,12 +5693,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
int err = 0;
struct ibv_context *attr_ctx = NULL;
struct ibv_device_attr device_attr;
+ struct mlx4_conf conf = {
+ .active_ports = 0,
+ };
unsigned int vf;
int idx;
int i;
(void)pci_drv;
- assert(pci_drv == &mlx4_driver.pci_drv);
+ assert(pci_drv == &mlx4_driver);
/* Get mlx4_dev[] index. */
idx = mlx4_dev_idx(&pci_dev->addr);
if (idx == -1) {
@@ -5571,10 +5715,8 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
list = ibv_get_device_list(&i);
if (list == NULL) {
assert(errno);
- if (errno == ENOSYS) {
- WARN("cannot list devices, is ib_uverbs loaded?");
- return 0;
- }
+ if (errno == ENOSYS)
+ ERROR("cannot list devices, is ib_uverbs loaded?");
return -errno;
}
assert(i >= 0);
@@ -5606,11 +5748,11 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_free_device_list(list);
switch (err) {
case 0:
- WARN("cannot access device, is mlx4_ib loaded?");
- return 0;
+ ERROR("cannot access device, is mlx4_ib loaded?");
+ return -ENODEV;
case EINVAL:
- WARN("cannot use device, are drivers up to date?");
- return 0;
+ ERROR("cannot use device, are drivers up to date?");
+ return -EINVAL;
}
assert(err > 0);
return -err;
@@ -5622,6 +5764,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
goto error;
INFO("%u port(s) detected", device_attr.phys_port_cnt);
+ if (mlx4_args(pci_dev->device.devargs, &conf)) {
+ ERROR("failed to process device arguments");
+ goto error;
+ }
+ /* Use all ports when none are defined */
+ if (conf.active_ports == 0) {
+ for (i = 0; i < MLX4_PMD_MAX_PHYS_PORTS; i++)
+ conf.active_ports |= 1 << i;
+ }
for (i = 0; i < device_attr.phys_port_cnt; i++) {
uint32_t port = i + 1; /* ports are indexed from one */
uint32_t test = (1 << i);
@@ -5635,6 +5786,9 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
#endif /* HAVE_EXP_QUERY_DEVICE */
struct ether_addr mac;
+ /* If port is not active, skip. */
+ if (!(conf.active_ports & (1 << i)))
+ continue;
#ifdef HAVE_EXP_QUERY_DEVICE
exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS;
#ifdef RSS_SUPPORT
@@ -5840,23 +5994,23 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->rx_pkt_burst = mlx4_rx_burst_secondary_setup;
} else {
eth_dev->data->dev_private = priv;
- eth_dev->data->rx_mbuf_alloc_failed = 0;
- eth_dev->data->mtu = ETHER_MTU;
eth_dev->data->mac_addrs = priv->mac;
}
- eth_dev->pci_dev = pci_dev;
+ eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->driver = &mlx4_driver;
+ eth_dev->device->driver = &mlx4_driver.driver;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx4_dev_ops;
- TAILQ_INIT(&eth_dev->link_intr_cbs);
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP);
+ /* Update link status once if waiting for LSC. */
+ if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ mlx4_link_update(eth_dev, 0);
continue;
port_error:
@@ -5910,16 +6064,14 @@ static const struct rte_pci_id mlx4_pci_id_map[] = {
}
};
-static struct eth_driver mlx4_driver = {
- .pci_drv = {
- .driver = {
- .name = MLX4_DRIVER_NAME
- },
- .id_table = mlx4_pci_id_map,
- .probe = mlx4_pci_probe,
- .drv_flags = RTE_PCI_DRV_INTR_LSC,
+static struct rte_pci_driver mlx4_driver = {
+ .driver = {
+ .name = MLX4_DRIVER_NAME
},
- .dev_private_size = sizeof(struct priv)
+ .id_table = mlx4_pci_id_map,
+ .probe = mlx4_pci_probe,
+ .drv_flags = RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_INTR_RMV,
};
/**
@@ -5938,8 +6090,10 @@ rte_mlx4_pmd_init(void)
*/
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
ibv_fork_init();
- rte_eal_pci_register(&mlx4_driver.pci_drv);
+ rte_pci_register(&mlx4_driver);
}
RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_mlx4, mlx4_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_mlx4,
+ "* ib_uverbs & mlx4_en & mlx4_core & mlx4_ib");
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 4c7505e2..9a3bae90 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
- * Copyright 2012-2015 6WIND S.A.
- * Copyright 2012 Mellanox.
+ * Copyright 2012-2017 6WIND S.A.
+ * Copyright 2012-2017 Mellanox.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -39,6 +39,33 @@
#include <limits.h>
/*
+ * Runtime logging through RTE_LOG() is enabled when not in debugging mode.
+ * Intermediate LOG_*() macros add the required end-of-line characters.
+ */
+#ifndef NDEBUG
+#define INFO(...) DEBUG(__VA_ARGS__)
+#define WARN(...) DEBUG(__VA_ARGS__)
+#define ERROR(...) DEBUG(__VA_ARGS__)
+#else
+#define LOG__(level, m, ...) \
+ RTE_LOG(level, PMD, MLX4_DRIVER_NAME ": " m "%c", __VA_ARGS__)
+#define LOG_(level, ...) LOG__(level, __VA_ARGS__, '\n')
+#define INFO(...) LOG_(INFO, __VA_ARGS__)
+#define WARN(...) LOG_(WARNING, __VA_ARGS__)
+#define ERROR(...) LOG_(ERR, __VA_ARGS__)
+#endif
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+/*
* Maximum number of simultaneous MAC addresses supported.
*
* According to ConnectX's Programmer Reference Manual:
@@ -54,6 +81,9 @@
/* Request send completion once in every 64 sends, might be less. */
#define MLX4_PMD_TX_PER_COMP_REQ 64
+/* Maximum number of physical ports. */
+#define MLX4_PMD_MAX_PHYS_PORTS 2
+
/* Maximum number of Scatter/Gather Elements per Work Request. */
#ifndef MLX4_PMD_SGE_WR_N
#define MLX4_PMD_SGE_WR_N 4
@@ -86,6 +116,9 @@
/* Alarm timeout. */
#define MLX4_ALARM_TIMEOUT_US 100000
+/* Port parameter. */
+#define MLX4_PMD_PORT_KVARG "port"
+
enum {
PCI_VENDOR_ID_MELLANOX = 0x15b3,
};
@@ -160,4 +193,165 @@ enum {
#define claim_positive(...) (__VA_ARGS__)
#endif /* NDEBUG */
+struct mlx4_rxq_stats {
+ unsigned int idx; /**< Mapping index. */
+#ifdef MLX4_PMD_SOFT_COUNTERS
+ uint64_t ipackets; /**< Total of successfully received packets. */
+ uint64_t ibytes; /**< Total of successfully received bytes. */
+#endif
+ uint64_t idropped; /**< Total of packets dropped when RX ring full. */
+ uint64_t rx_nombuf; /**< Total of RX mbuf allocation failures. */
+};
+
+/* RX element (scattered packets). */
+struct rxq_elt_sp {
+ struct ibv_recv_wr wr; /* Work Request. */
+ struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
+ struct rte_mbuf *bufs[MLX4_PMD_SGE_WR_N]; /* SGEs buffers. */
+};
+
+/* RX element. */
+struct rxq_elt {
+ struct ibv_recv_wr wr; /* Work Request. */
+ struct ibv_sge sge; /* Scatter/Gather Element. */
+ /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
+};
+
+/* RX queue descriptor. */
+struct rxq {
+ struct priv *priv; /* Back pointer to private data. */
+ struct rte_mempool *mp; /* Memory Pool for allocations. */
+ struct ibv_mr *mr; /* Memory Region (for mp). */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_qp *qp; /* Queue Pair. */
+ struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
+ struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+ /*
+ * Each VLAN ID requires a separate flow steering rule.
+ */
+ BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
+ struct ibv_flow *mac_flow[MLX4_MAX_MAC_ADDRESSES][MLX4_MAX_VLAN_IDS];
+ struct ibv_flow *promisc_flow; /* Promiscuous flow. */
+ struct ibv_flow *allmulti_flow; /* Multicast flow. */
+ unsigned int port_id; /* Port ID for incoming packets. */
+ unsigned int elts_n; /* (*elts)[] length. */
+ unsigned int elts_head; /* Current index in (*elts)[]. */
+ union {
+ struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */
+ struct rxq_elt (*no_sp)[]; /* RX elements. */
+ } elts;
+ unsigned int sp:1; /* Use scattered RX elements. */
+ unsigned int csum:1; /* Enable checksum offloading. */
+ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
+ struct mlx4_rxq_stats stats; /* RX queue counters. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ struct ibv_exp_res_domain *rd; /* Resource Domain. */
+};
+
+/* TX element. */
+struct txq_elt {
+ struct rte_mbuf *buf;
+};
+
+struct mlx4_txq_stats {
+ unsigned int idx; /**< Mapping index. */
+#ifdef MLX4_PMD_SOFT_COUNTERS
+ uint64_t opackets; /**< Total of successfully sent packets. */
+ uint64_t obytes; /**< Total of successfully sent bytes. */
+#endif
+ uint64_t odropped; /**< Total of packets not sent when TX ring full. */
+};
+
+/*
+ * Linear buffer type. It is used when transmitting buffers with too many
+ * segments that do not fit the hardware queue (see max_send_sge).
+ * Extra segments are copied (linearized) in such buffers, replacing the
+ * last SGE during TX.
+ * The size is arbitrary but large enough to hold a jumbo frame with
+ * 8 segments considering mbuf.buf_len is about 2048 bytes.
+ */
+typedef uint8_t linear_t[16384];
+
+/* TX queue descriptor. */
+struct txq {
+ struct priv *priv; /* Back pointer to private data. */
+ struct {
+ const struct rte_mempool *mp; /* Cached Memory Pool. */
+ struct ibv_mr *mr; /* Memory Region (for mp). */
+ uint32_t lkey; /* mr->lkey */
+ } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
+ struct ibv_cq *cq; /* Completion Queue. */
+ struct ibv_qp *qp; /* Queue Pair. */
+ struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
+ struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+#if MLX4_PMD_MAX_INLINE > 0
+ uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
+#endif
+ unsigned int elts_n; /* (*elts)[] length. */
+ struct txq_elt (*elts)[]; /* TX elements. */
+ unsigned int elts_head; /* Current index in (*elts)[]. */
+ unsigned int elts_tail; /* First element awaiting completion. */
+ unsigned int elts_comp; /* Number of completion requests. */
+ unsigned int elts_comp_cd; /* Countdown for next completion request. */
+ unsigned int elts_comp_cd_init; /* Initial value for countdown. */
+ struct mlx4_txq_stats stats; /* TX queue counters. */
+ linear_t (*elts_linear)[]; /* Linearized buffers. */
+ struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
+ unsigned int socket; /* CPU socket ID for allocations. */
+ struct ibv_exp_res_domain *rd; /* Resource Domain. */
+};
+
+struct rte_flow;
+
+struct priv {
+ struct rte_eth_dev *dev; /* Ethernet device. */
+ struct ibv_context *ctx; /* Verbs context. */
+ struct ibv_device_attr device_attr; /* Device properties. */
+ struct ibv_pd *pd; /* Protection Domain. */
+ /*
+ * MAC addresses array and configuration bit-field.
+ * An extra entry that cannot be modified by the DPDK is reserved
+ * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
+ */
+ struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
+ BITFIELD_DECLARE(mac_configured, uint32_t, MLX4_MAX_MAC_ADDRESSES);
+ /* VLAN filters. */
+ struct {
+ unsigned int enabled:1; /* If enabled. */
+ unsigned int id:12; /* VLAN ID (0-4095). */
+ } vlan_filter[MLX4_MAX_VLAN_IDS]; /* VLAN filters table. */
+ /* Device properties. */
+ uint16_t mtu; /* Configured MTU. */
+ uint8_t port; /* Physical port number. */
+ unsigned int started:1; /* Device started, flows enabled. */
+ unsigned int promisc:1; /* Device in promiscuous mode. */
+ unsigned int allmulti:1; /* Device receives all multicast packets. */
+ unsigned int hw_qpg:1; /* QP groups are supported. */
+ unsigned int hw_tss:1; /* TSS is supported. */
+ unsigned int hw_rss:1; /* RSS is supported. */
+ unsigned int hw_csum:1; /* Checksum offload is supported. */
+ unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
+ unsigned int rss:1; /* RSS is enabled. */
+ unsigned int vf:1; /* This is a VF device. */
+ unsigned int pending_alarm:1; /* An alarm is pending. */
+#ifdef INLINE_RECV
+ unsigned int inl_recv_size; /* Inline recv size */
+#endif
+ unsigned int max_rss_tbl_sz; /* Maximum number of RSS queues. */
+ /* RX/TX queues. */
+ struct rxq rxq_parent; /* Parent queue when RSS is enabled. */
+ unsigned int rxqs_n; /* RX queues array size. */
+ unsigned int txqs_n; /* TX queues array size. */
+ struct rxq *(*rxqs)[]; /* RX queues. */
+ struct txq *(*txqs)[]; /* TX queues. */
+ struct rte_intr_handle intr_handle; /* Interrupt handler. */
+ struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
+ LIST_HEAD(mlx4_flows, rte_flow) flows;
+ struct rte_intr_conf intr_conf; /* Active interrupt configuration. */
+ rte_spinlock_t lock; /* Lock for control functions. */
+};
+
+void priv_lock(struct priv *priv);
+void priv_unlock(struct priv *priv);
+
#endif /* RTE_PMD_MLX4_H_ */
diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c
new file mode 100644
index 00000000..edfac038
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_flow.c
@@ -0,0 +1,1090 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <assert.h>
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+
+/* Generated configuration header. */
+#include "mlx4_autoconf.h"
+
+/* PMD headers. */
+#include "mlx4.h"
+#include "mlx4_flow.h"
+
+/** Static initializer for items. */
+#define ITEMS(...) \
+ (const enum rte_flow_item_type []){ \
+ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+ }
+
+/** Structure to generate a simple graph of layers supported by the NIC. */
+struct mlx4_flow_items {
+ /** List of possible actions for these items. */
+ const enum rte_flow_action_type *const actions;
+ /** Bit-masks corresponding to the possibilities for the item. */
+ const void *mask;
+ /**
+ * Default bit-masks to use when item->mask is not provided. When
+ * \default_mask is also NULL, the full supported bit-mask (\mask) is
+ * used instead.
+ */
+ const void *default_mask;
+ /** Bit-masks size in bytes. */
+ const unsigned int mask_sz;
+ /**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param mask[in]
+ * Bit-masks covering supported fields to compare with spec,
+ * last and mask in
+ * \item.
+ * @param size
+ * Bit-Mask size in bytes.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+ int (*validate)(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size);
+ /**
+ * Conversion function from rte_flow to NIC specific flow.
+ *
+ * @param item
+ * rte_flow item to convert.
+ * @param default_mask
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data
+ * Internal structure to store the conversion.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+ int (*convert)(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+ /** Size in bytes of the destination structure. */
+ const unsigned int dst_sz;
+ /** List of possible following items. */
+ const enum rte_flow_item_type *const items;
+};
+
+struct rte_flow_drop {
+ struct ibv_qp *qp; /**< Verbs queue pair. */
+ struct ibv_cq *cq; /**< Verbs completion queue. */
+};
+
+/** Valid action for this PMD. */
+static const enum rte_flow_action_type valid_actions[] = {
+ RTE_FLOW_ACTION_TYPE_DROP,
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/**
+ * Convert Ethernet item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_eth(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+ unsigned int i;
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 2;
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *eth = (struct ibv_flow_spec_eth) {
+ .type = IBV_FLOW_SPEC_ETH,
+ .size = eth_size,
+ };
+ if (!spec) {
+ flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
+ return 0;
+ }
+ if (!mask)
+ mask = default_mask;
+ memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+ eth->val.src_mac[i] &= eth->mask.src_mac[i];
+ }
+ return 0;
+}
+
+/**
+ * Convert VLAN item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_vlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_flow_spec_eth);
+
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ eth->val.vlan_tag = spec->tci;
+ eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag &= eth->mask.vlan_tag;
+ return 0;
+}
+
+/**
+ * Convert IPv4 item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_ipv4(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_ipv4 *ipv4;
+ unsigned int ipv4_size = sizeof(struct ibv_flow_spec_ipv4);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 1;
+ ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *ipv4 = (struct ibv_flow_spec_ipv4) {
+ .type = IBV_FLOW_SPEC_IPV4,
+ .size = ipv4_size,
+ };
+ if (!spec)
+ return 0;
+ ipv4->val = (struct ibv_flow_ipv4_filter) {
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ };
+ if (!mask)
+ mask = default_mask;
+ ipv4->mask = (struct ibv_flow_ipv4_filter) {
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4->val.src_ip &= ipv4->mask.src_ip;
+ ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+ return 0;
+}
+
+/**
+ * Convert UDP item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_udp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_tcp_udp *udp;
+ unsigned int udp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *udp = (struct ibv_flow_spec_tcp_udp) {
+ .type = IBV_FLOW_SPEC_UDP,
+ .size = udp_size,
+ };
+ if (!spec)
+ return 0;
+ udp->val.dst_port = spec->hdr.dst_port;
+ udp->val.src_port = spec->hdr.src_port;
+ if (!mask)
+ mask = default_mask;
+ udp->mask.dst_port = mask->hdr.dst_port;
+ udp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp->val.src_port &= udp->mask.src_port;
+ udp->val.dst_port &= udp->mask.dst_port;
+ return 0;
+}
+
+/**
+ * Convert TCP item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx4_flow_create_tcp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct mlx4_flow *flow = (struct mlx4_flow *)data;
+ struct ibv_flow_spec_tcp_udp *tcp;
+ unsigned int tcp_size = sizeof(struct ibv_flow_spec_tcp_udp);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *tcp = (struct ibv_flow_spec_tcp_udp) {
+ .type = IBV_FLOW_SPEC_TCP,
+ .size = tcp_size,
+ };
+ if (!spec)
+ return 0;
+ tcp->val.dst_port = spec->hdr.dst_port;
+ tcp->val.src_port = spec->hdr.src_port;
+ if (!mask)
+ mask = default_mask;
+ tcp->mask.dst_port = mask->hdr.dst_port;
+ tcp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp->val.src_port &= tcp->mask.src_port;
+ tcp->val.dst_port &= tcp->mask.dst_port;
+ return 0;
+}
+
+/**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param mask[in]
+ * Bit-masks covering supported fields to compare with spec, last and mask in
+ * \item.
+ * @param size
+ * Bit-Mask size in bytes.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+static int
+mlx4_flow_item_validate(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ int ret = 0;
+
+ if (!item->spec && (item->mask || item->last))
+ return -1;
+ if (item->spec && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->spec;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->last && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->last;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ const uint8_t *apply = mask;
+ unsigned int i;
+
+ if (item->mask)
+ apply = item->mask;
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+ last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+ }
+ ret = memcmp(spec, last, size);
+ }
+ return ret;
+}
+
+static int
+mlx4_flow_validate_eth(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_eth *mask = item->mask;
+
+ if (mask->dst.addr_bytes[0] != 0xff ||
+ mask->dst.addr_bytes[1] != 0xff ||
+ mask->dst.addr_bytes[2] != 0xff ||
+ mask->dst.addr_bytes[3] != 0xff ||
+ mask->dst.addr_bytes[4] != 0xff ||
+ mask->dst.addr_bytes[5] != 0xff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+static int
+mlx4_flow_validate_vlan(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_vlan *mask = item->mask;
+
+ if (mask->tci != 0 &&
+ ntohs(mask->tci) != 0x0fff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+static int
+mlx4_flow_validate_ipv4(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+
+ if (mask->hdr.src_addr != 0 &&
+ mask->hdr.src_addr != 0xffffffff)
+ return -1;
+ if (mask->hdr.dst_addr != 0 &&
+ mask->hdr.dst_addr != 0xffffffff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+static int
+mlx4_flow_validate_udp(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_udp *mask = item->mask;
+
+ if (mask->hdr.src_port != 0 &&
+ mask->hdr.src_port != 0xffff)
+ return -1;
+ if (mask->hdr.dst_port != 0 &&
+ mask->hdr.dst_port != 0xffff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+static int
+mlx4_flow_validate_tcp(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ if (item->mask) {
+ const struct rte_flow_item_tcp *mask = item->mask;
+
+ if (mask->hdr.src_port != 0 &&
+ mask->hdr.src_port != 0xffff)
+ return -1;
+ if (mask->hdr.dst_port != 0 &&
+ mask->hdr.dst_port != 0xffff)
+ return -1;
+ }
+ return mlx4_flow_item_validate(item, mask, size);
+}
+
+/** Graph of supported items and associated actions. */
+static const struct mlx4_flow_items mlx4_flow_items[] = {
+ [RTE_FLOW_ITEM_TYPE_END] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ },
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .default_mask = &rte_flow_item_eth_mask,
+ .mask_sz = sizeof(struct rte_flow_item_eth),
+ .validate = mlx4_flow_validate_eth,
+ .convert = mlx4_flow_create_eth,
+ .dst_sz = sizeof(struct ibv_flow_spec_eth),
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_vlan){
+ /* rte_flow_item_vlan_mask is invalid for mlx4. */
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ .tci = 0x0fff,
+#else
+ .tci = 0xff0f,
+#endif
+ },
+ .mask_sz = sizeof(struct rte_flow_item_vlan),
+ .validate = mlx4_flow_validate_vlan,
+ .convert = mlx4_flow_create_vlan,
+ .dst_sz = 0,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_ipv4){
+ .hdr = {
+ .src_addr = -1,
+ .dst_addr = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_ipv4_mask,
+ .mask_sz = sizeof(struct rte_flow_item_ipv4),
+ .validate = mlx4_flow_validate_ipv4,
+ .convert = mlx4_flow_create_ipv4,
+ .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_udp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_udp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_udp),
+ .validate = mlx4_flow_validate_udp,
+ .convert = mlx4_flow_create_udp,
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_tcp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_tcp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_tcp),
+ .validate = mlx4_flow_validate_tcp,
+ .convert = mlx4_flow_create_tcp,
+ .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
+ },
+};
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] flow
+ * Flow structure to update.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_validate(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx4_flow *flow)
+{
+ const struct mlx4_flow_items *cur_item = mlx4_flow_items;
+ struct mlx4_flow_action action = {
+ .queue = 0,
+ .drop = 0,
+ };
+
+ (void)priv;
+ if (attr->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priorities are not supported");
+ return -rte_errno;
+ }
+ if (attr->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ return -rte_errno;
+ }
+ if (!attr->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "only ingress is supported");
+ return -rte_errno;
+ }
+ /* Go over items list. */
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct mlx4_flow_items *token = NULL;
+ unsigned int i;
+ int err;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ /*
+ * The nic can support patterns with NULL eth spec only
+ * if eth is a single item in a rule.
+ */
+ if (!items->spec &&
+ items->type == RTE_FLOW_ITEM_TYPE_ETH) {
+ const struct rte_flow_item *next = items + 1;
+
+ if (next->type != RTE_FLOW_ITEM_TYPE_END) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "the rule requires"
+ " an Ethernet spec");
+ return -rte_errno;
+ }
+ }
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &mlx4_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = cur_item->validate(items,
+ (const uint8_t *)cur_item->mask,
+ cur_item->mask_sz);
+ if (err)
+ goto exit_item_not_supported;
+ if (flow->ibv_attr && cur_item->convert) {
+ err = cur_item->convert(items,
+ (cur_item->default_mask ?
+ cur_item->default_mask :
+ cur_item->mask),
+ flow);
+ if (err)
+ goto exit_item_not_supported;
+ }
+ flow->offset += cur_item->dst_sz;
+ }
+ /* Go over actions list */
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ continue;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ action.drop = 1;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+
+ if (!queue || (queue->index > (priv->rxqs_n - 1)))
+ goto exit_action_not_supported;
+ action.queue = 1;
+ } else {
+ goto exit_action_not_supported;
+ }
+ }
+ if (!action.queue && !action.drop) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no valid action");
+ return -rte_errno;
+ }
+ return 0;
+exit_item_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ return -rte_errno;
+exit_action_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "action not supported");
+ return -rte_errno;
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx4_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr) };
+
+ priv_lock(priv);
+ ret = priv_flow_validate(priv, attr, items, actions, error, &flow);
+ priv_unlock(priv);
+ return ret;
+}
+
+/**
+ * Destroy a drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+mlx4_flow_destroy_drop_queue(struct priv *priv)
+{
+ if (priv->flow_drop_queue) {
+ struct rte_flow_drop *fdq = priv->flow_drop_queue;
+
+ priv->flow_drop_queue = NULL;
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ rte_free(fdq);
+ }
+}
+
+/**
+ * Create a single drop queue for all drop flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+static int
+mlx4_flow_create_drop_queue(struct priv *priv)
+{
+ struct ibv_qp *qp;
+ struct ibv_cq *cq;
+ struct rte_flow_drop *fdq;
+
+ fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
+ if (!fdq) {
+ ERROR("Cannot allocate memory for drop struct");
+ goto err;
+ }
+ cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+ &(struct ibv_exp_cq_init_attr){
+ .comp_mask = 0,
+ });
+ if (!cq) {
+ ERROR("Cannot create drop CQ");
+ goto err_create_cq;
+ }
+ qp = ibv_exp_create_qp(priv->ctx,
+ &(struct ibv_exp_qp_init_attr){
+ .send_cq = cq,
+ .recv_cq = cq,
+ .cap = {
+ .max_recv_wr = 1,
+ .max_recv_sge = 1,
+ },
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_PORT,
+ .pd = priv->pd,
+ .port_num = priv->port,
+ });
+ if (!qp) {
+ ERROR("Cannot create drop QP");
+ goto err_create_qp;
+ }
+ *fdq = (struct rte_flow_drop){
+ .qp = qp,
+ .cq = cq,
+ };
+ priv->flow_drop_queue = fdq;
+ return 0;
+err_create_qp:
+ claim_zero(ibv_destroy_cq(cq));
+err_create_cq:
+ rte_free(fdq);
+err:
+ return -1;
+}
+
+/**
+ * Complete flow rule creation.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param ibv_attr
+ * Verbs flow attributes.
+ * @param action
+ * Target action structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue(struct priv *priv,
+ struct ibv_flow_attr *ibv_attr,
+ struct mlx4_flow_action *action,
+ struct rte_flow_error *error)
+{
+ struct ibv_qp *qp;
+ struct rte_flow *rte_flow;
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+ if (!rte_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+ if (action->drop) {
+ qp = priv->flow_drop_queue->qp;
+ } else {
+ struct rxq *rxq = (*priv->rxqs)[action->queue_id];
+
+ qp = rxq->qp;
+ rte_flow->qp = qp;
+ }
+ rte_flow->ibv_attr = ibv_attr;
+ rte_flow->ibv_flow = ibv_create_flow(qp, rte_flow->ibv_attr);
+ if (!rte_flow->ibv_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "flow rule creation failure");
+ goto error;
+ }
+ return rte_flow;
+
+error:
+ rte_free(rte_flow);
+ return NULL;
+}
+
+/**
+ * Convert a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] items
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow on success, NULL otherwise.
+ */
+static struct rte_flow *
+priv_flow_create(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rte_flow;
+ struct mlx4_flow_action action;
+ struct mlx4_flow flow = { .offset = sizeof(struct ibv_flow_attr), };
+ int err;
+
+ err = priv_flow_validate(priv, attr, items, actions, error, &flow);
+ if (err)
+ return NULL;
+ flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
+ if (!flow.ibv_attr) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate ibv_attr memory");
+ return NULL;
+ }
+ flow.offset = sizeof(struct ibv_flow_attr);
+ *flow.ibv_attr = (struct ibv_flow_attr){
+ .comp_mask = 0,
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .size = sizeof(struct ibv_flow_attr),
+ .priority = attr->priority,
+ .num_of_specs = 0,
+ .port = priv->port,
+ .flags = 0,
+ };
+ claim_zero(priv_flow_validate(priv, attr, items, actions,
+ error, &flow));
+ action = (struct mlx4_flow_action){
+ .queue = 0,
+ .drop = 0,
+ };
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ continue;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ action.queue = 1;
+ action.queue_id =
+ ((const struct rte_flow_action_queue *)
+ actions->conf)->index;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ action.drop = 1;
+ } else {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "unsupported action");
+ goto exit;
+ }
+ }
+ rte_flow = priv_flow_create_action_queue(priv, flow.ibv_attr,
+ &action, error);
+ if (rte_flow)
+ return rte_flow;
+exit:
+ rte_free(flow.ibv_attr);
+ return NULL;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx4_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
+
+ priv_lock(priv);
+ flow = priv_flow_create(priv, attr, items, actions, error);
+ if (flow) {
+ LIST_INSERT_HEAD(&priv->flows, flow, next);
+ DEBUG("Flow created %p", (void *)flow);
+ }
+ priv_unlock(priv);
+ return flow;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] flow
+ * Flow to destroy.
+ */
+static void
+priv_flow_destroy(struct priv *priv, struct rte_flow *flow)
+{
+ (void)priv;
+ LIST_REMOVE(flow, next);
+ if (flow->ibv_flow)
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ rte_free(flow->ibv_attr);
+ DEBUG("Flow destroyed %p", (void *)flow);
+ rte_free(flow);
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx4_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_destroy(priv, flow);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_flow_flush(struct priv *priv)
+{
+ while (!LIST_EMPTY(&priv->flows)) {
+ struct rte_flow *flow;
+
+ flow = LIST_FIRST(&priv->flows);
+ priv_flow_destroy(priv, flow);
+ }
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx4_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_flush(priv);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Remove all flows.
+ *
+ * Called by dev_stop() to remove all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+mlx4_priv_flow_stop(struct priv *priv)
+{
+ struct rte_flow *flow;
+
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ DEBUG("Flow %p removed", (void *)flow);
+ }
+ mlx4_flow_destroy_drop_queue(priv);
+}
+
+/**
+ * Add all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_priv_flow_start(struct priv *priv)
+{
+ int ret;
+ struct ibv_qp *qp;
+ struct rte_flow *flow;
+
+ ret = mlx4_flow_create_drop_queue(priv);
+ if (ret)
+ return -1;
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ qp = flow->qp ? flow->qp : priv->flow_drop_queue->qp;
+ flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
+ if (!flow->ibv_flow) {
+ DEBUG("Flow %p cannot be applied", (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ }
+ return 0;
+}
diff --git a/drivers/net/mlx4/mlx4_flow.h b/drivers/net/mlx4/mlx4_flow.h
new file mode 100644
index 00000000..12a293e4
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_flow.h
@@ -0,0 +1,102 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RTE_PMD_MLX4_FLOW_H_
+#define RTE_PMD_MLX4_FLOW_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_byteorder.h>
+
+#include "mlx4.h"
+
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ struct ibv_flow *ibv_flow; /**< Verbs flow. */
+ struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_qp *qp; /**< Verbs queue pair. */
+};
+
+int
+mlx4_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+struct rte_flow *
+mlx4_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+int
+mlx4_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+
+int
+mlx4_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error);
+
+/** Structure to pass to the conversion function. */
+struct mlx4_flow {
+ struct ibv_flow_attr *ibv_attr; /**< Verbs attribute. */
+ unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+};
+
+struct mlx4_flow_action {
+ uint32_t drop:1; /**< Target is a drop queue. */
+ uint32_t queue:1; /**< Target is a receive queue. */
+ uint32_t queue_id; /**< Identifier of the queue. */
+};
+
+int mlx4_priv_flow_start(struct priv *priv);
+void mlx4_priv_flow_stop(struct priv *priv);
+
+#endif /* RTE_PMD_MLX4_FLOW_H_ */
diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
index cf87f0b1..c0799591 100644
--- a/drivers/net/mlx5/Makefile
+++ b/drivers/net/mlx5/Makefile
@@ -48,13 +48,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c
SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c
-
-# Dependencies.
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_kvargs
+SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c
# Basic CFLAGS.
CFLAGS += -O3
@@ -104,7 +98,7 @@ endif
mlx5_autoconf.h.new: FORCE
-mlx5_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh
+mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
$Q $(RM) -f -- '$@'
$Q sh -- '$<' '$@' \
HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \
@@ -136,6 +130,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh
/usr/include/linux/ethtool.h \
enum ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT \
$(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_UPDATE_CQ_CI \
+ infiniband/mlx5_hw.h \
+ func ibv_mlx5_exp_update_cq_ci \
+ $(AUTOCONF_OUTPUT)
# Create mlx5_autoconf.h or update it in case it differs from the new one.
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index cb45fd0f..fc99c0d5 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -56,6 +56,7 @@
#endif
#include <rte_malloc.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_pci.h>
#include <rte_common.h>
#include <rte_kvargs.h>
@@ -84,6 +85,27 @@
/* Device parameter to enable multi-packet send WQEs. */
#define MLX5_TXQ_MPW_EN "txq_mpw_en"
+/* Device parameter to include 2 dsegs in the title WQEBB. */
+#define MLX5_TXQ_MPW_HDR_DSEG_EN "txq_mpw_hdr_dseg_en"
+
+/* Device parameter to limit the size of inlining packet. */
+#define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
+
+/* Device parameter to enable hardware TSO offload. */
+#define MLX5_TSO "tso"
+
+/* Default PMD specific parameter value. */
+#define MLX5_ARG_UNSET (-1)
+
+struct mlx5_args {
+ int cqe_comp;
+ int txq_inline;
+ int txqs_inline;
+ int mps;
+ int mpw_hdr_dseg;
+ int inline_max_packet_sz;
+ int tso;
+};
/**
* Retrieve integer value from environment variable.
*
@@ -199,6 +221,9 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.link_update = mlx5_link_update,
.stats_get = mlx5_stats_get,
.stats_reset = mlx5_stats_reset,
+ .xstats_get = mlx5_xstats_get,
+ .xstats_reset = mlx5_xstats_reset,
+ .xstats_get_names = mlx5_xstats_get_names,
.dev_infos_get = mlx5_dev_infos_get,
.dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get,
.vlan_filter_set = mlx5_vlan_filter_set,
@@ -219,6 +244,10 @@ static const struct eth_dev_ops mlx5_dev_ops = {
.rss_hash_update = mlx5_rss_hash_update,
.rss_hash_conf_get = mlx5_rss_hash_conf_get,
.filter_ctrl = mlx5_dev_filter_ctrl,
+ .rx_descriptor_status = mlx5_rx_descriptor_status,
+ .tx_descriptor_status = mlx5_tx_descriptor_status,
+ .rx_queue_intr_enable = mlx5_rx_intr_enable,
+ .rx_queue_intr_disable = mlx5_rx_intr_disable,
};
static struct {
@@ -270,7 +299,7 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr)
static int
mlx5_args_check(const char *key, const char *val, void *opaque)
{
- struct priv *priv = opaque;
+ struct mlx5_args *args = opaque;
unsigned long tmp;
errno = 0;
@@ -280,13 +309,19 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
return errno;
}
if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) {
- priv->cqe_comp = !!tmp;
+ args->cqe_comp = !!tmp;
} else if (strcmp(MLX5_TXQ_INLINE, key) == 0) {
- priv->txq_inline = tmp;
+ args->txq_inline = tmp;
} else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) {
- priv->txqs_inline = tmp;
+ args->txqs_inline = tmp;
} else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) {
- priv->mps = !!tmp;
+ args->mps = !!tmp;
+ } else if (strcmp(MLX5_TXQ_MPW_HDR_DSEG_EN, key) == 0) {
+ args->mpw_hdr_dseg = !!tmp;
+ } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
+ args->inline_max_packet_sz = tmp;
+ } else if (strcmp(MLX5_TSO, key) == 0) {
+ args->tso = !!tmp;
} else {
WARN("%s: unknown parameter", key);
return -EINVAL;
@@ -306,13 +341,16 @@ mlx5_args_check(const char *key, const char *val, void *opaque)
* 0 on success, errno value on failure.
*/
static int
-mlx5_args(struct priv *priv, struct rte_devargs *devargs)
+mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs)
{
const char **params = (const char *[]){
MLX5_RXQ_CQE_COMP_EN,
MLX5_TXQ_INLINE,
MLX5_TXQS_MIN_INLINE,
MLX5_TXQ_MPW_EN,
+ MLX5_TXQ_MPW_HDR_DSEG_EN,
+ MLX5_TXQ_MAX_INLINE_LEN,
+ MLX5_TSO,
NULL,
};
struct rte_kvargs *kvlist;
@@ -329,7 +367,7 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
for (i = 0; (params[i] != NULL); ++i) {
if (rte_kvargs_count(kvlist, params[i])) {
ret = rte_kvargs_process(kvlist, params[i],
- mlx5_args_check, priv);
+ mlx5_args_check, args);
if (ret != 0) {
rte_kvargs_free(kvlist);
return ret;
@@ -340,7 +378,35 @@ mlx5_args(struct priv *priv, struct rte_devargs *devargs)
return 0;
}
-static struct eth_driver mlx5_driver;
+static struct rte_pci_driver mlx5_driver;
+
+/**
+ * Assign parameters from args into priv, only non default
+ * values are considered.
+ *
+ * @param[out] priv
+ * Pointer to private structure.
+ * @param[in] args
+ * Pointer to args values.
+ */
+static void
+mlx5_args_assign(struct priv *priv, struct mlx5_args *args)
+{
+ if (args->cqe_comp != MLX5_ARG_UNSET)
+ priv->cqe_comp = args->cqe_comp;
+ if (args->txq_inline != MLX5_ARG_UNSET)
+ priv->txq_inline = args->txq_inline;
+ if (args->txqs_inline != MLX5_ARG_UNSET)
+ priv->txqs_inline = args->txqs_inline;
+ if (args->mps != MLX5_ARG_UNSET)
+ priv->mps = args->mps ? priv->mps : 0;
+ if (args->mpw_hdr_dseg != MLX5_ARG_UNSET)
+ priv->mpw_hdr_dseg = args->mpw_hdr_dseg;
+ if (args->inline_max_packet_sz != MLX5_ARG_UNSET)
+ priv->inline_max_packet_sz = args->inline_max_packet_sz;
+ if (args->tso != MLX5_ARG_UNSET)
+ priv->tso = args->tso;
+}
/**
* DPDK callback to register a PCI device.
@@ -366,11 +432,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_device_attr device_attr;
unsigned int sriov;
unsigned int mps;
+ unsigned int tunnel_en;
int idx;
int i;
(void)pci_drv;
- assert(pci_drv == &mlx5_driver.pci_drv);
+ assert(pci_drv == &mlx5_driver);
/* Get mlx5_dev[] index. */
idx = mlx5_dev_idx(&pci_dev->addr);
if (idx == -1) {
@@ -384,10 +451,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
list = ibv_get_device_list(&i);
if (list == NULL) {
assert(errno);
- if (errno == ENOSYS) {
- WARN("cannot list devices, is ib_uverbs loaded?");
- return 0;
- }
+ if (errno == ENOSYS)
+ ERROR("cannot list devices, is ib_uverbs loaded?");
return -errno;
}
assert(i >= 0);
@@ -410,15 +475,39 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
sriov = ((pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) ||
(pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF));
- /* Multi-packet send is only supported by ConnectX-4 Lx PF. */
- mps = (pci_dev->id.device_id ==
- PCI_DEVICE_ID_MELLANOX_CONNECTX4LX);
+ PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) ||
+ (pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF) ||
+ (pci_dev->id.device_id ==
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF));
+ /*
+ * Multi-packet send is supported by ConnectX-4 Lx PF as well
+ * as all ConnectX-5 devices.
+ */
+ switch (pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
+ tunnel_en = 1;
+ mps = MLX5_MPW_DISABLED;
+ break;
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX:
+ mps = MLX5_MPW;
+ break;
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF:
+ tunnel_en = 1;
+ mps = MLX5_MPW_ENHANCED;
+ break;
+ default:
+ mps = MLX5_MPW_DISABLED;
+ }
INFO("PCI information matches, using device \"%s\""
- " (SR-IOV: %s, MPS: %s)",
+ " (SR-IOV: %s, %sMPS: %s)",
list[i]->name,
sriov ? "true" : "false",
- mps ? "true" : "false");
+ mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
+ mps != MLX5_MPW_DISABLED ? "true" : "false");
attr_ctx = ibv_open_device(list[i]);
err = errno;
break;
@@ -427,11 +516,11 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
ibv_free_device_list(list);
switch (err) {
case 0:
- WARN("cannot access device, is mlx5_ib loaded?");
- return 0;
+ ERROR("cannot access device, is mlx5_ib loaded?");
+ return -ENODEV;
case EINVAL:
- WARN("cannot use device, are drivers up to date?");
- return 0;
+ ERROR("cannot use device, are drivers up to date?");
+ return -EINVAL;
}
assert(err > 0);
return -err;
@@ -454,12 +543,22 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
struct ibv_exp_device_attr exp_device_attr;
struct ether_addr mac;
uint16_t num_vfs = 0;
+ struct mlx5_args args = {
+ .cqe_comp = MLX5_ARG_UNSET,
+ .txq_inline = MLX5_ARG_UNSET,
+ .txqs_inline = MLX5_ARG_UNSET,
+ .mps = MLX5_ARG_UNSET,
+ .mpw_hdr_dseg = MLX5_ARG_UNSET,
+ .inline_max_packet_sz = MLX5_ARG_UNSET,
+ .tso = MLX5_ARG_UNSET,
+ };
exp_device_attr.comp_mask =
IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS |
IBV_EXP_DEVICE_ATTR_RX_HASH |
IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS |
IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN |
+ IBV_EXP_DEVICE_ATTR_TSO_CAPS |
0;
DEBUG("using port %u (%08" PRIx32 ")", port, test);
@@ -513,12 +612,14 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
- err = mlx5_args(priv, pci_dev->device.devargs);
+ priv->tunnel_en = tunnel_en;
+ err = mlx5_args(&args, pci_dev->device.devargs);
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
goto port_error;
}
+ mlx5_args_assign(priv, &args);
if (ibv_exp_query_device(ctx, &exp_device_attr)) {
ERROR("ibv_exp_query_device() failed");
goto port_error;
@@ -540,8 +641,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size;
/* Remove this check once DPDK supports larger/variable
* indirection tables. */
- if (priv->ind_table_max_size > (unsigned int)RSS_INDIRECTION_TABLE_SIZE)
- priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE;
+ if (priv->ind_table_max_size >
+ (unsigned int)ETH_RSS_RETA_SIZE_512)
+ priv->ind_table_max_size = ETH_RSS_RETA_SIZE_512;
DEBUG("maximum RX indirection table size is %u",
priv->ind_table_max_size);
priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap &
@@ -560,11 +662,36 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv_get_num_vfs(priv, &num_vfs);
priv->sriov = (num_vfs || sriov);
+ priv->tso = ((priv->tso) &&
+ (exp_device_attr.tso_caps.max_tso > 0) &&
+ (exp_device_attr.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_ETH)));
+ if (priv->tso)
+ priv->max_tso_payload_sz =
+ exp_device_attr.tso_caps.max_tso;
if (priv->mps && !mps) {
ERROR("multi-packet send not supported on this device"
" (" MLX5_TXQ_MPW_EN ")");
err = ENOTSUP;
goto port_error;
+ } else if (priv->mps && priv->tso) {
+ WARN("multi-packet send not supported in conjunction "
+ "with TSO. MPS disabled");
+ priv->mps = 0;
+ }
+ INFO("%sMPS is %s",
+ priv->mps == MLX5_MPW_ENHANCED ? "Enhanced " : "",
+ priv->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
+ /* Set default values for Enhanced MPW, a.k.a MPWv2. */
+ if (priv->mps == MLX5_MPW_ENHANCED) {
+ if (args.txqs_inline == MLX5_ARG_UNSET)
+ priv->txqs_inline = MLX5_EMPW_MIN_TXQS;
+ if (args.inline_max_packet_sz == MLX5_ARG_UNSET)
+ priv->inline_max_packet_sz =
+ MLX5_EMPW_MAX_INLINE_LEN;
+ if (args.txq_inline == MLX5_ARG_UNSET)
+ priv->txq_inline = MLX5_WQE_SIZE_MAX -
+ MLX5_WQE_SIZE;
}
/* Allocate and register default RSS hash keys. */
priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n,
@@ -654,23 +781,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
eth_dev->rx_pkt_burst = mlx5_rx_burst_secondary_setup;
} else {
eth_dev->data->dev_private = priv;
- eth_dev->data->rx_mbuf_alloc_failed = 0;
- eth_dev->data->mtu = ETHER_MTU;
eth_dev->data->mac_addrs = priv->mac;
}
- eth_dev->pci_dev = pci_dev;
+ eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
- eth_dev->driver = &mlx5_driver;
+ eth_dev->device->driver = &mlx5_driver.driver;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
- TAILQ_INIT(&eth_dev->link_intr_cbs);
-
/* Bring Ethernet device up. */
DEBUG("forcing Ethernet interface up");
priv_set_flags(priv, ~IFF_UP, IFF_UP);
- mlx5_link_update_unlocked(priv->dev, 1);
+ mlx5_link_update(priv->dev, 1);
continue;
port_error:
@@ -725,20 +848,33 @@ static const struct rte_pci_id mlx5_pci_id_map[] = {
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)
},
{
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EX)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF)
+ },
+ {
.vendor_id = 0
}
};
-static struct eth_driver mlx5_driver = {
- .pci_drv = {
- .driver = {
- .name = MLX5_DRIVER_NAME
- },
- .id_table = mlx5_pci_id_map,
- .probe = mlx5_pci_probe,
- .drv_flags = RTE_PCI_DRV_INTR_LSC,
+static struct rte_pci_driver mlx5_driver = {
+ .driver = {
+ .name = MLX5_DRIVER_NAME
},
- .dev_private_size = sizeof(struct priv)
+ .id_table = mlx5_pci_id_map,
+ .probe = mlx5_pci_probe,
+ .drv_flags = RTE_PCI_DRV_INTR_LSC,
};
/**
@@ -756,8 +892,9 @@ rte_mlx5_pmd_init(void)
*/
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
ibv_fork_init();
- rte_eal_pci_register(&mlx5_driver.pci_drv);
+ rte_pci_register(&mlx5_driver);
}
RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib");
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 79b7a600..67fd7428 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -59,6 +59,7 @@
#include <rte_spinlock.h>
#include <rte_interrupts.h>
#include <rte_errno.h>
+#include <rte_flow.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -82,6 +83,18 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX4VF = 0x1014,
PCI_DEVICE_ID_MELLANOX_CONNECTX4LX = 0x1015,
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5 = 0x1017,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5VF = 0x1018,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EX = 0x1019,
+ PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a,
+};
+
+struct mlx5_xstats_ctrl {
+ /* Number of device stats. */
+ uint16_t stats_n;
+ /* Index in the device counters table. */
+ uint16_t dev_table_idx[MLX5_MAX_XSTATS];
+ uint64_t base[MLX5_MAX_XSTATS];
};
struct priv {
@@ -110,11 +123,17 @@ struct priv {
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
unsigned int sriov:1; /* This is a VF or PF with VF devices. */
- unsigned int mps:1; /* Whether multi-packet send is supported. */
+ unsigned int mps:2; /* Multi-packet send mode (0: disabled). */
+ unsigned int mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */
unsigned int pending_alarm:1; /* An alarm is pending. */
+ unsigned int tso:1; /* Whether TSO is supported. */
+ unsigned int tunnel_en:1;
+ /* Whether Tx offloads for tunneled packets are supported. */
+ unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */
unsigned int txq_inline; /* Maximum packet size for inlining. */
unsigned int txqs_inline; /* Queue number threshold for inlining. */
+ unsigned int inline_max_packet_sz; /* Max packet size for inlining. */
/* RX/TX queues. */
unsigned int rxqs_n; /* RX queues array size. */
unsigned int txqs_n; /* TX queues array size. */
@@ -135,7 +154,10 @@ struct priv {
unsigned int reta_idx_n; /* RETA index size. */
struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
struct fdir_queue *fdir_drop_queue; /* Flow director drop queue. */
+ struct rte_flow_drop *flow_drop_queue; /* Flow drop queue. */
+ LIST_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
uint32_t link_speed_capa; /* Link speed capabilities. */
+ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */
rte_spinlock_t lock; /* Lock for control functions. */
};
@@ -182,13 +204,14 @@ struct priv *mlx5_get_priv(struct rte_eth_dev *dev);
int mlx5_is_secondary(void);
int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]);
int priv_ifreq(const struct priv *, int req, struct ifreq *);
+int priv_is_ib_cntr(const char *);
+int priv_get_cntr_sysfs(struct priv *, const char *, uint64_t *);
int priv_get_num_vfs(struct priv *, uint16_t *);
int priv_get_mtu(struct priv *, uint16_t *);
int priv_set_flags(struct priv *, unsigned int, unsigned int);
int mlx5_dev_configure(struct rte_eth_dev *);
void mlx5_dev_infos_get(struct rte_eth_dev *, struct rte_eth_dev_info *);
const uint32_t *mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev);
-int mlx5_link_update_unlocked(struct rte_eth_dev *, int);
int mlx5_link_update(struct rte_eth_dev *, int);
int mlx5_dev_set_mtu(struct rte_eth_dev *, uint16_t);
int mlx5_dev_get_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
@@ -196,7 +219,7 @@ int mlx5_dev_set_flow_ctrl(struct rte_eth_dev *, struct rte_eth_fc_conf *);
int mlx5_ibv_device_to_pci_addr(const struct ibv_device *,
struct rte_pci_addr *);
void mlx5_dev_link_status_handler(void *);
-void mlx5_dev_interrupt_handler(struct rte_intr_handle *, void *);
+void mlx5_dev_interrupt_handler(void *);
void priv_dev_interrupt_handler_uninstall(struct priv *, struct rte_eth_dev *);
void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *);
int mlx5_set_link_down(struct rte_eth_dev *dev);
@@ -215,8 +238,8 @@ int hash_rxq_mac_addrs_add(struct hash_rxq *);
int priv_mac_addr_add(struct priv *, unsigned int,
const uint8_t (*)[ETHER_ADDR_LEN]);
int priv_mac_addrs_enable(struct priv *);
-void mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t,
- uint32_t);
+int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t,
+ uint32_t);
void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *);
/* mlx5_rss.c */
@@ -244,8 +267,14 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *);
/* mlx5_stats.c */
+void priv_xstats_init(struct priv *);
void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
void mlx5_stats_reset(struct rte_eth_dev *);
+int mlx5_xstats_get(struct rte_eth_dev *,
+ struct rte_eth_xstat *, unsigned int);
+void mlx5_xstats_reset(struct rte_eth_dev *);
+int mlx5_xstats_get_names(struct rte_eth_dev *,
+ struct rte_eth_xstat_name *, unsigned int);
/* mlx5_vlan.c */
@@ -268,4 +297,22 @@ void priv_fdir_enable(struct priv *);
int mlx5_dev_filter_ctrl(struct rte_eth_dev *, enum rte_filter_type,
enum rte_filter_op, void *);
+/* mlx5_flow.c */
+
+int mlx5_flow_validate(struct rte_eth_dev *, const struct rte_flow_attr *,
+ const struct rte_flow_item [],
+ const struct rte_flow_action [],
+ struct rte_flow_error *);
+struct rte_flow *mlx5_flow_create(struct rte_eth_dev *,
+ const struct rte_flow_attr *,
+ const struct rte_flow_item [],
+ const struct rte_flow_action [],
+ struct rte_flow_error *);
+int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
+ struct rte_flow_error *);
+int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
+int priv_flow_start(struct priv *);
+void priv_flow_stop(struct priv *);
+int priv_flow_rxq_in_use(struct priv *, struct rxq *);
+
#endif /* RTE_PMD_MLX5_H_ */
diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h
index b32816e6..201bb336 100644
--- a/drivers/net/mlx5/mlx5_defs.h
+++ b/drivers/net/mlx5/mlx5_defs.h
@@ -54,8 +54,12 @@
*/
#define MLX5_TX_COMP_THRESH 32
-/* RSS Indirection table size. */
-#define RSS_INDIRECTION_TABLE_SIZE 256
+/*
+ * Request TX completion every time the total number of WQEBBs used for inlining
+ * packets exceeds the size of WQ divided by this divisor. Better to be power of
+ * two for performance.
+ */
+#define MLX5_TX_COMP_THRESH_INLINE_DIV (1 << 3)
/*
* Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP
@@ -79,4 +83,10 @@
/* Alarm timeout. */
#define MLX5_ALARM_TIMEOUT_US 100000
+/* Maximum number of extended statistics counters. */
+#define MLX5_MAX_XSTATS 32
+
+/* Maximum Packet headers size (L2+L3+L4) for TSO. */
+#define MLX5_MAX_TSO_HEADER 128
+
#endif /* RTE_PMD_MLX5_DEFS_H_ */
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 06cfd016..3fd22cb8 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -234,6 +234,23 @@ try_dev_id:
}
/**
+ * Check if the counter is located on ib counters file.
+ *
+ * @param[in] cntr
+ * Counter name.
+ *
+ * @return
+ * 1 if counter is located on ib counters file , 0 otherwise.
+ */
+int
+priv_is_ib_cntr(const char *cntr)
+{
+ if (!strcmp(cntr, "out_of_buffer"))
+ return 1;
+ return 0;
+}
+
+/**
* Read from sysfs entry.
*
* @param[in] priv
@@ -260,10 +277,15 @@ priv_sysfs_read(const struct priv *priv, const char *entry,
if (priv_get_ifname(priv, &ifname))
return -1;
- MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
- ifname, entry);
-
- file = fopen(path, "rb");
+ if (priv_is_ib_cntr(entry)) {
+ MKSTR(path, "%s/ports/1/hw_counters/%s",
+ priv->ctx->device->ibdev_path, entry);
+ file = fopen(path, "rb");
+ } else {
+ MKSTR(path, "%s/device/net/%s/%s",
+ priv->ctx->device->ibdev_path, ifname, entry);
+ file = fopen(path, "rb");
+ }
if (file == NULL)
return -1;
ret = fread(buf, 1, size, file);
@@ -469,6 +491,30 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu)
}
/**
+ * Read device counter from sysfs.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param name
+ * Counter name.
+ * @param[out] cntr
+ * Counter output buffer.
+ *
+ * @return
+ * 0 on success, -1 on failure and errno is set.
+ */
+int
+priv_get_cntr_sysfs(struct priv *priv, const char *name, uint64_t *cntr)
+{
+ unsigned long ulong_ctr;
+
+ if (priv_get_sysfs_ulong(priv, name, &ulong_ctr) == -1)
+ return -1;
+ *cntr = ulong_ctr;
+ return 0;
+}
+
+/**
* Set device MTU.
*
* @param priv
@@ -615,6 +661,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
unsigned int max;
char ifname[IF_NAMESIZE];
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
priv_lock(priv);
/* FIXME: we should ask the device for these values. */
info->min_rx_bufsize = 32;
@@ -645,14 +693,16 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
(DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM);
+ if (priv->tso)
+ info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ if (priv->tunnel_en)
+ info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO);
if (priv_get_ifname(priv, &ifname) == 0)
info->if_index = if_nametoindex(ifname);
- /* FIXME: RETA update/query API expects the callee to know the size of
- * the indirection table, for this PMD the size varies depending on
- * the number of RX queues, it becomes impossible to find the correct
- * size if it is not fixed.
- * The API should be updated to solve this problem. */
- info->reta_size = priv->ind_table_max_size;
+ info->reta_size = priv->reta_idx_n ?
+ priv->reta_idx_n : priv->ind_table_max_size;
info->hash_key_size = ((*priv->rss_conf) ?
(*priv->rss_conf)[0]->rss_key_len :
0);
@@ -665,10 +715,10 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
static const uint32_t ptypes[] = {
/* refers to rxq_cq_to_pkt_type() */
- RTE_PTYPE_L3_IPV4,
- RTE_PTYPE_L3_IPV6,
- RTE_PTYPE_INNER_L3_IPV4,
- RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
RTE_PTYPE_UNKNOWN
};
@@ -679,7 +729,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
}
/**
- * Retrieve physical link information (unlocked version using legacy ioctl).
+ * DPDK callback to retrieve physical link information.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -697,6 +747,8 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
struct rte_eth_link dev_link;
int link_speed = 0;
+ /* priv_lock() is not taken to allow concurrent calls. */
+
(void)wait_to_complete;
if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
@@ -827,7 +879,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
}
/**
- * DPDK callback to retrieve physical link information (unlocked version).
+ * DPDK callback to retrieve physical link information.
*
* @param dev
* Pointer to Ethernet device structure.
@@ -835,7 +887,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
* Wait for request completion (ignored).
*/
int
-mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
struct utsname utsname;
int ver[3];
@@ -849,26 +901,6 @@ mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
}
/**
- * DPDK callback to retrieve physical link information.
- *
- * @param dev
- * Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
- */
-int
-mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
- struct priv *priv = mlx5_get_priv(dev);
- int ret;
-
- priv_lock(priv);
- ret = mlx5_link_update_unlocked(dev, wait_to_complete);
- priv_unlock(priv);
- return ret;
-}
-
-/**
* DPDK callback to change the MTU.
*
* Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
@@ -959,7 +991,6 @@ recover:
struct rxq *rxq = (*priv->rxqs)[i];
struct rxq_ctrl *rxq_ctrl =
container_of(rxq, struct rxq_ctrl, rxq);
- int sp;
unsigned int mb_len;
unsigned int tmp;
@@ -967,10 +998,9 @@ recover:
continue;
mb_len = rte_pktmbuf_data_room_size(rxq->mp);
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- /* Toggle scattered support (sp) if necessary. */
- sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
/* Provide new values to rxq_setup(). */
- dev->data->dev_conf.rxmode.jumbo_frame = sp;
+ dev->data->dev_conf.rxmode.jumbo_frame =
+ (max_frame_len > ETHER_MAX_LEN);
dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
if (rehash)
ret = rxq_rehash(dev, rxq_ctrl);
@@ -1244,13 +1274,12 @@ mlx5_dev_link_status_handler(void *arg)
* Callback argument.
*/
void
-mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
+mlx5_dev_interrupt_handler(void *cb_arg)
{
struct rte_eth_dev *dev = cb_arg;
struct priv *priv = dev->data->dev_private;
int ret;
- (void)intr_handle;
priv_lock(priv);
ret = priv_dev_link_status_handler(priv, dev);
priv_unlock(priv);
@@ -1553,14 +1582,15 @@ void
priv_select_tx_function(struct priv *priv)
{
priv->dev->tx_pkt_burst = mlx5_tx_burst;
- /* Display warning for unsupported configurations. */
- if (priv->sriov && priv->mps)
- WARN("multi-packet send WQE cannot be used on a SR-IOV setup");
/* Select appropriate TX function. */
- if ((priv->sriov == 0) && priv->mps && priv->txq_inline) {
+ if (priv->mps == MLX5_MPW_ENHANCED) {
+ priv->dev->tx_pkt_burst =
+ mlx5_tx_burst_empw;
+ DEBUG("selected Enhanced MPW TX function");
+ } else if (priv->mps && priv->txq_inline) {
priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
DEBUG("selected MPW inline TX function");
- } else if ((priv->sriov == 0) && priv->mps) {
+ } else if (priv->mps) {
priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw;
DEBUG("selected MPW TX function");
}
diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c
index 1acf6826..f80c58b4 100644
--- a/drivers/net/mlx5/mlx5_fdir.c
+++ b/drivers/net/mlx5/mlx5_fdir.c
@@ -55,6 +55,8 @@
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_common.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -1042,6 +1044,14 @@ priv_fdir_ctrl_func(struct priv *priv, enum rte_filter_op filter_op, void *arg)
return ret;
}
+static const struct rte_flow_ops mlx5_flow_ops = {
+ .validate = mlx5_flow_validate,
+ .create = mlx5_flow_create,
+ .destroy = mlx5_flow_destroy,
+ .flush = mlx5_flow_flush,
+ .query = NULL,
+};
+
/**
* Manage filter operations.
*
@@ -1067,6 +1077,11 @@ mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
struct priv *priv = dev->data->dev_private;
switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &mlx5_flow_ops;
+ return 0;
case RTE_ETH_FILTER_FDIR:
priv_lock(priv);
ret = priv_fdir_ctrl_func(priv, filter_op, arg);
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
new file mode 100644
index 00000000..adcbe3f5
--- /dev/null
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -0,0 +1,1586 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+
+/* Verbs header. */
+/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
+#ifdef PEDANTIC
+#pragma GCC diagnostic ignored "-Wpedantic"
+#endif
+#include <infiniband/verbs.h>
+#ifdef PEDANTIC
+#pragma GCC diagnostic error "-Wpedantic"
+#endif
+
+#include <rte_ethdev.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_malloc.h>
+
+#include "mlx5.h"
+#include "mlx5_prm.h"
+
+/* Number of Work Queue necessary for the DROP queue. */
+#define MLX5_DROP_WQ_N 4
+
+static int
+mlx5_flow_create_eth(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_vlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_ipv4(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_ipv6(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_udp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_tcp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+static int
+mlx5_flow_create_vxlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
+ struct ibv_exp_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
+ struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
+ struct ibv_qp *qp; /**< Verbs queue pair. */
+ struct ibv_exp_flow *ibv_flow; /**< Verbs flow. */
+ struct ibv_exp_wq *wq; /**< Verbs work queue. */
+ struct ibv_cq *cq; /**< Verbs completion queue. */
+ uint16_t rxqs_n; /**< Number of queues in this flow, 0 if drop queue. */
+ uint32_t mark:1; /**< Set if the flow is marked. */
+ uint32_t drop:1; /**< Drop queue. */
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+ struct rxq *rxqs[]; /**< Pointer to the queues array. */
+};
+
+/** Static initializer for items. */
+#define ITEMS(...) \
+ (const enum rte_flow_item_type []){ \
+ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+ }
+
+/** Structure to generate a simple graph of layers supported by the NIC. */
+struct mlx5_flow_items {
+ /** List of possible actions for these items. */
+ const enum rte_flow_action_type *const actions;
+ /** Bit-masks corresponding to the possibilities for the item. */
+ const void *mask;
+ /**
+ * Default bit-masks to use when item->mask is not provided. When
+ * \default_mask is also NULL, the full supported bit-mask (\mask) is
+ * used instead.
+ */
+ const void *default_mask;
+ /** Bit-masks size in bytes. */
+ const unsigned int mask_sz;
+ /**
+ * Conversion function from rte_flow to NIC specific flow.
+ *
+ * @param item
+ * rte_flow item to convert.
+ * @param default_mask
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data
+ * Internal structure to store the conversion.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+ int (*convert)(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data);
+ /** Size in bytes of the destination structure. */
+ const unsigned int dst_sz;
+ /** List of possible following items. */
+ const enum rte_flow_item_type *const items;
+};
+
+/** Valid action for this PMD. */
+static const enum rte_flow_action_type valid_actions[] = {
+ RTE_FLOW_ACTION_TYPE_DROP,
+ RTE_FLOW_ACTION_TYPE_QUEUE,
+ RTE_FLOW_ACTION_TYPE_MARK,
+ RTE_FLOW_ACTION_TYPE_FLAG,
+ RTE_FLOW_ACTION_TYPE_END,
+};
+
+/** Graph of supported items and associated actions. */
+static const struct mlx5_flow_items mlx5_flow_items[] = {
+ [RTE_FLOW_ITEM_TYPE_END] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
+ RTE_FLOW_ITEM_TYPE_VXLAN),
+ },
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = -1,
+ },
+ .default_mask = &rte_flow_item_eth_mask,
+ .mask_sz = sizeof(struct rte_flow_item_eth),
+ .convert = mlx5_flow_create_eth,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_eth),
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_vlan){
+ .tci = -1,
+ },
+ .default_mask = &rte_flow_item_vlan_mask,
+ .mask_sz = sizeof(struct rte_flow_item_vlan),
+ .convert = mlx5_flow_create_vlan,
+ .dst_sz = 0,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_ipv4){
+ .hdr = {
+ .src_addr = -1,
+ .dst_addr = -1,
+ .type_of_service = -1,
+ .next_proto_id = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_ipv4_mask,
+ .mask_sz = sizeof(struct rte_flow_item_ipv4),
+ .convert = mlx5_flow_create_ipv4,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv4_ext),
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_ipv6){
+ .hdr = {
+ .src_addr = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ },
+ .dst_addr = {
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ },
+ .vtc_flow = -1,
+ .proto = -1,
+ .hop_limits = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_ipv6_mask,
+ .mask_sz = sizeof(struct rte_flow_item_ipv6),
+ .convert = mlx5_flow_create_ipv6,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_ipv6_ext),
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_udp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_udp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_udp),
+ .convert = mlx5_flow_create_udp,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_tcp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .default_mask = &rte_flow_item_tcp_mask,
+ .mask_sz = sizeof(struct rte_flow_item_tcp),
+ .convert = mlx5_flow_create_tcp,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_tcp_udp),
+ },
+ [RTE_FLOW_ITEM_TYPE_VXLAN] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ .actions = valid_actions,
+ .mask = &(const struct rte_flow_item_vxlan){
+ .vni = "\xff\xff\xff",
+ },
+ .default_mask = &rte_flow_item_vxlan_mask,
+ .mask_sz = sizeof(struct rte_flow_item_vxlan),
+ .convert = mlx5_flow_create_vxlan,
+ .dst_sz = sizeof(struct ibv_exp_flow_spec_tunnel),
+ },
+};
+
+/** Structure to pass to the conversion function. */
+struct mlx5_flow {
+ struct ibv_exp_flow_attr *ibv_attr; /**< Verbs attribute. */
+ unsigned int offset; /**< Offset in bytes in the ibv_attr buffer. */
+ uint32_t inner; /**< Set once VXLAN is encountered. */
+ uint64_t hash_fields; /**< Fields that participate in the hash. */
+};
+
+/** Structure for Drop queue. */
+struct rte_flow_drop {
+ struct ibv_exp_rwq_ind_table *ind_table; /**< Indirection table. */
+ struct ibv_qp *qp; /**< Verbs queue pair. */
+ struct ibv_exp_wq *wqs[MLX5_DROP_WQ_N]; /**< Verbs work queue. */
+ struct ibv_cq *cq; /**< Verbs completion queue. */
+};
+
+struct mlx5_flow_action {
+ uint32_t queue:1; /**< Target is a receive queue. */
+ uint32_t drop:1; /**< Target is a drop queue. */
+ uint32_t mark:1; /**< Mark is present in the flow. */
+ uint32_t mark_id; /**< Mark identifier. */
+ uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes to use. */
+ uint16_t queues_n; /**< Number of entries in queue[]. */
+};
+
+/**
+ * Check support for a given item.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param mask[in]
+ * Bit-masks covering supported fields to compare with spec, last and mask in
+ * \item.
+ * @param size
+ * Bit-Mask size in bytes.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+mlx5_flow_item_validate(const struct rte_flow_item *item,
+ const uint8_t *mask, unsigned int size)
+{
+ int ret = 0;
+
+ if (!item->spec && (item->mask || item->last))
+ return -1;
+ if (item->spec && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->spec;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->last && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->last;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->mask;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | mask[i]) != mask[i])
+ return -1;
+ }
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ const uint8_t *apply = mask;
+ unsigned int i;
+
+ if (item->mask)
+ apply = item->mask;
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+ last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+ }
+ ret = memcmp(spec, last, size);
+ }
+ return ret;
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] flow
+ * Flow structure to update.
+ * @param[in, out] action
+ * Action structure to update.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_validate(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct mlx5_flow *flow,
+ struct mlx5_flow_action *action)
+{
+ const struct mlx5_flow_items *cur_item = mlx5_flow_items;
+
+ (void)priv;
+ if (attr->group) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priorities are not supported");
+ return -rte_errno;
+ }
+ if (attr->egress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ return -rte_errno;
+ }
+ if (!attr->ingress) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "only ingress is supported");
+ return -rte_errno;
+ }
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct mlx5_flow_items *token = NULL;
+ unsigned int i;
+ int err;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &mlx5_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = mlx5_flow_item_validate(items,
+ (const uint8_t *)cur_item->mask,
+ cur_item->mask_sz);
+ if (err)
+ goto exit_item_not_supported;
+ if (flow->ibv_attr && cur_item->convert) {
+ err = cur_item->convert(items,
+ (cur_item->default_mask ?
+ cur_item->default_mask :
+ cur_item->mask),
+ flow);
+ if (err)
+ goto exit_item_not_supported;
+ } else if (items->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
+ if (flow->inner) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "cannot recognize multiple"
+ " VXLAN encapsulations");
+ return -rte_errno;
+ }
+ flow->inner = 1;
+ }
+ flow->offset += cur_item->dst_sz;
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ continue;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ action->drop = 1;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+ uint16_t n;
+ uint16_t found = 0;
+
+ if (!queue || (queue->index > (priv->rxqs_n - 1)))
+ goto exit_action_not_supported;
+ for (n = 0; n < action->queues_n; ++n) {
+ if (action->queues[n] == queue->index) {
+ found = 1;
+ break;
+ }
+ }
+ if (action->queues_n > 1 && !found) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "queue action not in RSS queues");
+ return -rte_errno;
+ }
+ if (!found) {
+ action->queue = 1;
+ action->queues_n = 1;
+ action->queues[0] = queue->index;
+ }
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
+ const struct rte_flow_action_rss *rss =
+ (const struct rte_flow_action_rss *)
+ actions->conf;
+ uint16_t n;
+
+ if (!rss || !rss->num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no valid queues");
+ return -rte_errno;
+ }
+ if (action->queues_n == 1) {
+ uint16_t found = 0;
+
+ assert(action->queues_n);
+ for (n = 0; n < rss->num; ++n) {
+ if (action->queues[0] ==
+ rss->queue[n]) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "queue action not in RSS"
+ " queues");
+ return -rte_errno;
+ }
+ }
+ for (n = 0; n < rss->num; ++n) {
+ if (rss->queue[n] >= priv->rxqs_n) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "queue id > number of"
+ " queues");
+ return -rte_errno;
+ }
+ }
+ action->queue = 1;
+ for (n = 0; n < rss->num; ++n)
+ action->queues[n] = rss->queue[n];
+ action->queues_n = rss->num;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_MARK) {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
+
+ if (!mark) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "mark must be defined");
+ return -rte_errno;
+ } else if (mark->id >= MLX5_FLOW_MARK_MAX) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "mark must be between 0"
+ " and 16777199");
+ return -rte_errno;
+ }
+ action->mark = 1;
+ action->mark_id = mark->id;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
+ action->mark = 1;
+ } else {
+ goto exit_action_not_supported;
+ }
+ }
+ if (action->mark && !flow->ibv_attr && !action->drop)
+ flow->offset += sizeof(struct ibv_exp_flow_spec_action_tag);
+ if (!action->queue && !action->drop) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "no valid action");
+ return -rte_errno;
+ }
+ return 0;
+exit_item_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ return -rte_errno;
+exit_action_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "action not supported");
+ return -rte_errno;
+}
+
+/**
+ * Validate a flow supported by the NIC.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ int ret;
+ struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr) };
+ struct mlx5_flow_action action = {
+ .queue = 0,
+ .drop = 0,
+ .mark = 0,
+ .mark_id = MLX5_FLOW_MARK_DEFAULT,
+ .queues_n = 0,
+ };
+
+ priv_lock(priv);
+ ret = priv_flow_validate(priv, attr, items, actions, error, &flow,
+ &action);
+ priv_unlock(priv);
+ return ret;
+}
+
+/**
+ * Convert Ethernet item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_eth(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+ unsigned int i;
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 2;
+ flow->hash_fields = 0;
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *eth = (struct ibv_exp_flow_spec_eth) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_ETH,
+ .size = eth_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ eth->val.ether_type = spec->type;
+ memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(eth->mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ eth->mask.ether_type = mask->type;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
+ eth->val.src_mac[i] &= eth->mask.src_mac[i];
+ }
+ eth->val.ether_type &= eth->mask.ether_type;
+ return 0;
+}
+
+/**
+ * Convert VLAN item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_vlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_eth *eth;
+ const unsigned int eth_size = sizeof(struct ibv_exp_flow_spec_eth);
+
+ eth = (void *)((uintptr_t)flow->ibv_attr + flow->offset - eth_size);
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ eth->val.vlan_tag = spec->tci;
+ eth->mask.vlan_tag = mask->tci;
+ eth->val.vlan_tag &= eth->mask.vlan_tag;
+ return 0;
+}
+
+/**
+ * Convert IPv4 item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_ipv4(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_ipv4_ext *ipv4;
+ unsigned int ipv4_size = sizeof(struct ibv_exp_flow_spec_ipv4_ext);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 1;
+ flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
+ IBV_EXP_RX_HASH_DST_IPV4);
+ ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *ipv4 = (struct ibv_exp_flow_spec_ipv4_ext) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV4_EXT,
+ .size = ipv4_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ ipv4->val = (struct ibv_exp_flow_ipv4_ext_filter){
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ .proto = spec->hdr.next_proto_id,
+ .tos = spec->hdr.type_of_service,
+ };
+ ipv4->mask = (struct ibv_exp_flow_ipv4_ext_filter){
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ .proto = mask->hdr.next_proto_id,
+ .tos = mask->hdr.type_of_service,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4->val.src_ip &= ipv4->mask.src_ip;
+ ipv4->val.dst_ip &= ipv4->mask.dst_ip;
+ ipv4->val.proto &= ipv4->mask.proto;
+ ipv4->val.tos &= ipv4->mask.tos;
+ return 0;
+}
+
+/**
+ * Convert IPv6 item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_ipv6(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_ipv6_ext *ipv6;
+ unsigned int ipv6_size = sizeof(struct ibv_exp_flow_spec_ipv6_ext);
+ unsigned int i;
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 1;
+ flow->hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
+ IBV_EXP_RX_HASH_DST_IPV6);
+ ipv6 = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *ipv6 = (struct ibv_exp_flow_spec_ipv6_ext) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_IPV6_EXT,
+ .size = ipv6_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ memcpy(ipv6->val.src_ip, spec->hdr.src_addr,
+ RTE_DIM(ipv6->val.src_ip));
+ memcpy(ipv6->val.dst_ip, spec->hdr.dst_addr,
+ RTE_DIM(ipv6->val.dst_ip));
+ memcpy(ipv6->mask.src_ip, mask->hdr.src_addr,
+ RTE_DIM(ipv6->mask.src_ip));
+ memcpy(ipv6->mask.dst_ip, mask->hdr.dst_addr,
+ RTE_DIM(ipv6->mask.dst_ip));
+ ipv6->mask.flow_label = mask->hdr.vtc_flow;
+ ipv6->mask.next_hdr = mask->hdr.proto;
+ ipv6->mask.hop_limit = mask->hdr.hop_limits;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < RTE_DIM(ipv6->val.src_ip); ++i) {
+ ipv6->val.src_ip[i] &= ipv6->mask.src_ip[i];
+ ipv6->val.dst_ip[i] &= ipv6->mask.dst_ip[i];
+ }
+ ipv6->val.flow_label &= ipv6->mask.flow_label;
+ ipv6->val.next_hdr &= ipv6->mask.next_hdr;
+ ipv6->val.hop_limit &= ipv6->mask.hop_limit;
+ return 0;
+}
+
+/**
+ * Convert UDP item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_udp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_tcp_udp *udp;
+ unsigned int udp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_UDP |
+ IBV_EXP_RX_HASH_DST_PORT_UDP);
+ udp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *udp = (struct ibv_exp_flow_spec_tcp_udp) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_UDP,
+ .size = udp_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ udp->val.dst_port = spec->hdr.dst_port;
+ udp->val.src_port = spec->hdr.src_port;
+ udp->mask.dst_port = mask->hdr.dst_port;
+ udp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp->val.src_port &= udp->mask.src_port;
+ udp->val.dst_port &= udp->mask.dst_port;
+ return 0;
+}
+
+/**
+ * Convert TCP item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_tcp(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_tcp_udp *tcp;
+ unsigned int tcp_size = sizeof(struct ibv_exp_flow_spec_tcp_udp);
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ flow->hash_fields |= (IBV_EXP_RX_HASH_SRC_PORT_TCP |
+ IBV_EXP_RX_HASH_DST_PORT_TCP);
+ tcp = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *tcp = (struct ibv_exp_flow_spec_tcp_udp) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_TCP,
+ .size = tcp_size,
+ };
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ tcp->val.dst_port = spec->hdr.dst_port;
+ tcp->val.src_port = spec->hdr.src_port;
+ tcp->mask.dst_port = mask->hdr.dst_port;
+ tcp->mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp->val.src_port &= tcp->mask.src_port;
+ tcp->val.dst_port &= tcp->mask.dst_port;
+ return 0;
+}
+
+/**
+ * Convert VXLAN item to Verbs specification.
+ *
+ * @param item[in]
+ * Item specification.
+ * @param default_mask[in]
+ * Default bit-masks to use when item->mask is not provided.
+ * @param data[in, out]
+ * User structure.
+ */
+static int
+mlx5_flow_create_vxlan(const struct rte_flow_item *item,
+ const void *default_mask,
+ void *data)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ struct mlx5_flow *flow = (struct mlx5_flow *)data;
+ struct ibv_exp_flow_spec_tunnel *vxlan;
+ unsigned int size = sizeof(struct ibv_exp_flow_spec_tunnel);
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id;
+
+ ++flow->ibv_attr->num_of_specs;
+ flow->ibv_attr->priority = 0;
+ id.vni[0] = 0;
+ vxlan = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *vxlan = (struct ibv_exp_flow_spec_tunnel) {
+ .type = flow->inner | IBV_EXP_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ flow->inner = IBV_EXP_FLOW_SPEC_INNER;
+ if (!spec)
+ return 0;
+ if (!mask)
+ mask = default_mask;
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan->val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan->mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan->val.tunnel_id &= vxlan->mask.tunnel_id;
+ return 0;
+}
+
+/**
+ * Convert mark/flag action to Verbs specification.
+ *
+ * @param flow
+ * Pointer to MLX5 flow structure.
+ * @param mark_id
+ * Mark identifier.
+ */
+static int
+mlx5_flow_create_flag_mark(struct mlx5_flow *flow, uint32_t mark_id)
+{
+ struct ibv_exp_flow_spec_action_tag *tag;
+ unsigned int size = sizeof(struct ibv_exp_flow_spec_action_tag);
+
+ tag = (void *)((uintptr_t)flow->ibv_attr + flow->offset);
+ *tag = (struct ibv_exp_flow_spec_action_tag){
+ .type = IBV_EXP_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ .tag_id = mlx5_flow_mark_set(mark_id),
+ };
+ ++flow->ibv_attr->num_of_specs;
+ return 0;
+}
+
+/**
+ * Complete flow rule creation with a drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param flow
+ * MLX5 flow attributes (filled by mlx5_flow_validate()).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue_drop(struct priv *priv,
+ struct mlx5_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rte_flow;
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow), 0);
+ if (!rte_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+ rte_flow->drop = 1;
+ rte_flow->ibv_attr = flow->ibv_attr;
+ rte_flow->qp = priv->flow_drop_queue->qp;
+ if (!priv->started)
+ return rte_flow;
+ rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+ rte_flow->ibv_attr);
+ if (!rte_flow->ibv_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "flow rule creation failure");
+ goto error;
+ }
+ return rte_flow;
+error:
+ assert(rte_flow);
+ rte_free(rte_flow);
+ return NULL;
+}
+
+/**
+ * Complete flow rule creation.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param flow
+ * MLX5 flow attributes (filled by mlx5_flow_validate()).
+ * @param action
+ * Target action structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow if the rule could be created.
+ */
+static struct rte_flow *
+priv_flow_create_action_queue(struct priv *priv,
+ struct mlx5_flow *flow,
+ struct mlx5_flow_action *action,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rte_flow;
+ unsigned int i;
+ unsigned int j;
+ const unsigned int wqs_n = 1 << log2above(action->queues_n);
+ struct ibv_exp_wq *wqs[wqs_n];
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ assert(!action->drop);
+ rte_flow = rte_calloc(__func__, 1, sizeof(*rte_flow) +
+ sizeof(*rte_flow->rxqs) * action->queues_n, 0);
+ if (!rte_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate flow memory");
+ return NULL;
+ }
+ for (i = 0; i < action->queues_n; ++i) {
+ struct rxq_ctrl *rxq;
+
+ rxq = container_of((*priv->rxqs)[action->queues[i]],
+ struct rxq_ctrl, rxq);
+ wqs[i] = rxq->wq;
+ rte_flow->rxqs[i] = &rxq->rxq;
+ ++rte_flow->rxqs_n;
+ rxq->rxq.mark |= action->mark;
+ }
+ /* finalise indirection table. */
+ for (j = 0; i < wqs_n; ++i, ++j) {
+ wqs[i] = wqs[j];
+ if (j == action->queues_n)
+ j = 0;
+ }
+ rte_flow->mark = action->mark;
+ rte_flow->ibv_attr = flow->ibv_attr;
+ rte_flow->hash_fields = flow->hash_fields;
+ rte_flow->ind_table = ibv_exp_create_rwq_ind_table(
+ priv->ctx,
+ &(struct ibv_exp_rwq_ind_table_init_attr){
+ .pd = priv->pd,
+ .log_ind_tbl_size = log2above(action->queues_n),
+ .ind_tbl = wqs,
+ .comp_mask = 0,
+ });
+ if (!rte_flow->ind_table) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate indirection table");
+ goto error;
+ }
+ rte_flow->qp = ibv_exp_create_qp(
+ priv->ctx,
+ &(struct ibv_exp_qp_init_attr){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_PORT |
+ IBV_EXP_QP_INIT_ATTR_RX_HASH,
+ .pd = priv->pd,
+ .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+ .rx_hash_function =
+ IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_hash_default_key_len,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = rte_flow->hash_fields,
+ .rwq_ind_tbl = rte_flow->ind_table,
+ },
+ .port_num = priv->port,
+ });
+ if (!rte_flow->qp) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate QP");
+ goto error;
+ }
+ if (!priv->started)
+ return rte_flow;
+ rte_flow->ibv_flow = ibv_exp_create_flow(rte_flow->qp,
+ rte_flow->ibv_attr);
+ if (!rte_flow->ibv_flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "flow rule creation failure");
+ goto error;
+ }
+ return rte_flow;
+error:
+ assert(rte_flow);
+ if (rte_flow->qp)
+ ibv_destroy_qp(rte_flow->qp);
+ if (rte_flow->ind_table)
+ ibv_exp_destroy_rwq_ind_table(rte_flow->ind_table);
+ rte_free(rte_flow);
+ return NULL;
+}
+
+/**
+ * Convert a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow on success, NULL otherwise.
+ */
+static struct rte_flow *
+priv_flow_create(struct priv *priv,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow *rte_flow;
+ struct mlx5_flow flow = { .offset = sizeof(struct ibv_exp_flow_attr), };
+ struct mlx5_flow_action action = {
+ .queue = 0,
+ .drop = 0,
+ .mark = 0,
+ .mark_id = MLX5_FLOW_MARK_DEFAULT,
+ .queues_n = 0,
+ };
+ int err;
+
+ err = priv_flow_validate(priv, attr, items, actions, error, &flow,
+ &action);
+ if (err)
+ goto exit;
+ flow.ibv_attr = rte_malloc(__func__, flow.offset, 0);
+ flow.offset = sizeof(struct ibv_exp_flow_attr);
+ if (!flow.ibv_attr) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate ibv_attr memory");
+ goto exit;
+ }
+ *flow.ibv_attr = (struct ibv_exp_flow_attr){
+ .type = IBV_EXP_FLOW_ATTR_NORMAL,
+ .size = sizeof(struct ibv_exp_flow_attr),
+ .priority = attr->priority,
+ .num_of_specs = 0,
+ .port = 0,
+ .flags = 0,
+ .reserved = 0,
+ };
+ flow.inner = 0;
+ flow.hash_fields = 0;
+ claim_zero(priv_flow_validate(priv, attr, items, actions,
+ error, &flow, &action));
+ if (action.mark && !action.drop) {
+ mlx5_flow_create_flag_mark(&flow, action.mark_id);
+ flow.offset += sizeof(struct ibv_exp_flow_spec_action_tag);
+ }
+ if (action.drop)
+ rte_flow =
+ priv_flow_create_action_queue_drop(priv, &flow, error);
+ else
+ rte_flow = priv_flow_create_action_queue(priv, &flow, &action,
+ error);
+ if (!rte_flow)
+ goto exit;
+ return rte_flow;
+exit:
+ rte_free(flow.ibv_attr);
+ return NULL;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+struct rte_flow *
+mlx5_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow *flow;
+
+ priv_lock(priv);
+ flow = priv_flow_create(priv, attr, items, actions, error);
+ if (flow) {
+ LIST_INSERT_HEAD(&priv->flows, flow, next);
+ DEBUG("Flow created %p", (void *)flow);
+ }
+ priv_unlock(priv);
+ return flow;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[in] flow
+ * Flow to destroy.
+ */
+static void
+priv_flow_destroy(struct priv *priv,
+ struct rte_flow *flow)
+{
+ (void)priv;
+ LIST_REMOVE(flow, next);
+ if (flow->ibv_flow)
+ claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+ if (flow->drop)
+ goto free;
+ if (flow->qp)
+ claim_zero(ibv_destroy_qp(flow->qp));
+ if (flow->ind_table)
+ claim_zero(ibv_exp_destroy_rwq_ind_table(flow->ind_table));
+ if (flow->drop && flow->wq)
+ claim_zero(ibv_exp_destroy_wq(flow->wq));
+ if (flow->drop && flow->cq)
+ claim_zero(ibv_destroy_cq(flow->cq));
+ if (flow->mark) {
+ struct rte_flow *tmp;
+ struct rxq *rxq;
+ uint32_t mark_n = 0;
+ uint32_t queue_n;
+
+ /*
+ * To remove the mark from the queue, the queue must not be
+ * present in any other marked flow (RSS or not).
+ */
+ for (queue_n = 0; queue_n < flow->rxqs_n; ++queue_n) {
+ rxq = flow->rxqs[queue_n];
+ for (tmp = LIST_FIRST(&priv->flows);
+ tmp;
+ tmp = LIST_NEXT(tmp, next)) {
+ uint32_t tqueue_n;
+
+ if (tmp->drop)
+ continue;
+ for (tqueue_n = 0;
+ tqueue_n < tmp->rxqs_n;
+ ++tqueue_n) {
+ struct rxq *trxq;
+
+ trxq = tmp->rxqs[tqueue_n];
+ if (rxq == trxq)
+ ++mark_n;
+ }
+ }
+ rxq->mark = !!mark_n;
+ }
+ }
+free:
+ rte_free(flow->ibv_attr);
+ DEBUG("Flow destroyed %p", (void *)flow);
+ rte_free(flow);
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_destroy(priv, flow);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_flow_flush(struct priv *priv)
+{
+ while (!LIST_EMPTY(&priv->flows)) {
+ struct rte_flow *flow;
+
+ flow = LIST_FIRST(&priv->flows);
+ priv_flow_destroy(priv, flow);
+ }
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+mlx5_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+
+ (void)error;
+ priv_lock(priv);
+ priv_flow_flush(priv);
+ priv_unlock(priv);
+ return 0;
+}
+
+/**
+ * Create drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+priv_flow_create_drop_queue(struct priv *priv)
+{
+ struct rte_flow_drop *fdq = NULL;
+ unsigned int i;
+
+ assert(priv->pd);
+ assert(priv->ctx);
+ fdq = rte_calloc(__func__, 1, sizeof(*fdq), 0);
+ if (!fdq) {
+ WARN("cannot allocate memory for drop queue");
+ goto error;
+ }
+ fdq->cq = ibv_exp_create_cq(priv->ctx, 1, NULL, NULL, 0,
+ &(struct ibv_exp_cq_init_attr){
+ .comp_mask = 0,
+ });
+ if (!fdq->cq) {
+ WARN("cannot allocate CQ for drop queue");
+ goto error;
+ }
+ for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
+ fdq->wqs[i] = ibv_exp_create_wq(priv->ctx,
+ &(struct ibv_exp_wq_init_attr){
+ .wq_type = IBV_EXP_WQT_RQ,
+ .max_recv_wr = 1,
+ .max_recv_sge = 1,
+ .pd = priv->pd,
+ .cq = fdq->cq,
+ });
+ if (!fdq->wqs[i]) {
+ WARN("cannot allocate WQ for drop queue");
+ goto error;
+ }
+ }
+ fdq->ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
+ &(struct ibv_exp_rwq_ind_table_init_attr){
+ .pd = priv->pd,
+ .log_ind_tbl_size = 0,
+ .ind_tbl = fdq->wqs,
+ .comp_mask = 0,
+ });
+ if (!fdq->ind_table) {
+ WARN("cannot allocate indirection table for drop queue");
+ goto error;
+ }
+ fdq->qp = ibv_exp_create_qp(priv->ctx,
+ &(struct ibv_exp_qp_init_attr){
+ .qp_type = IBV_QPT_RAW_PACKET,
+ .comp_mask =
+ IBV_EXP_QP_INIT_ATTR_PD |
+ IBV_EXP_QP_INIT_ATTR_PORT |
+ IBV_EXP_QP_INIT_ATTR_RX_HASH,
+ .pd = priv->pd,
+ .rx_hash_conf = &(struct ibv_exp_rx_hash_conf){
+ .rx_hash_function =
+ IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
+ .rx_hash_key_len = rss_hash_default_key_len,
+ .rx_hash_key = rss_hash_default_key,
+ .rx_hash_fields_mask = 0,
+ .rwq_ind_tbl = fdq->ind_table,
+ },
+ .port_num = priv->port,
+ });
+ if (!fdq->qp) {
+ WARN("cannot allocate QP for drop queue");
+ goto error;
+ }
+ priv->flow_drop_queue = fdq;
+ return 0;
+error:
+ if (fdq->qp)
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ if (fdq->ind_table)
+ claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
+ for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
+ if (fdq->wqs[i])
+ claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
+ }
+ if (fdq->cq)
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ if (fdq)
+ rte_free(fdq);
+ priv->flow_drop_queue = NULL;
+ return -1;
+}
+
+/**
+ * Delete drop queue.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_flow_delete_drop_queue(struct priv *priv)
+{
+ struct rte_flow_drop *fdq = priv->flow_drop_queue;
+ unsigned int i;
+
+ if (!fdq)
+ return;
+ if (fdq->qp)
+ claim_zero(ibv_destroy_qp(fdq->qp));
+ if (fdq->ind_table)
+ claim_zero(ibv_exp_destroy_rwq_ind_table(fdq->ind_table));
+ for (i = 0; i != MLX5_DROP_WQ_N; ++i) {
+ if (fdq->wqs[i])
+ claim_zero(ibv_exp_destroy_wq(fdq->wqs[i]));
+ }
+ if (fdq->cq)
+ claim_zero(ibv_destroy_cq(fdq->cq));
+ rte_free(fdq);
+ priv->flow_drop_queue = NULL;
+}
+
+/**
+ * Remove all flows.
+ *
+ * Called by dev_stop() to remove all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_flow_stop(struct priv *priv)
+{
+ struct rte_flow *flow;
+
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ claim_zero(ibv_exp_destroy_flow(flow->ibv_flow));
+ flow->ibv_flow = NULL;
+ if (flow->mark) {
+ unsigned int n;
+
+ for (n = 0; n < flow->rxqs_n; ++n)
+ flow->rxqs[n]->mark = 0;
+ }
+ DEBUG("Flow %p removed", (void *)flow);
+ }
+ priv_flow_delete_drop_queue(priv);
+}
+
+/**
+ * Add all flows.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * 0 on success, a errno value otherwise and rte_errno is set.
+ */
+int
+priv_flow_start(struct priv *priv)
+{
+ int ret;
+ struct rte_flow *flow;
+
+ ret = priv_flow_create_drop_queue(priv);
+ if (ret)
+ return -1;
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ struct ibv_qp *qp;
+
+ if (flow->drop)
+ qp = priv->flow_drop_queue->qp;
+ else
+ qp = flow->qp;
+ flow->ibv_flow = ibv_exp_create_flow(qp, flow->ibv_attr);
+ if (!flow->ibv_flow) {
+ DEBUG("Flow %p cannot be applied", (void *)flow);
+ rte_errno = EINVAL;
+ return rte_errno;
+ }
+ DEBUG("Flow %p applied", (void *)flow);
+ if (flow->mark) {
+ unsigned int n;
+
+ for (n = 0; n < flow->rxqs_n; ++n)
+ flow->rxqs[n]->mark = 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Verify if the Rx queue is used in a flow.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param rxq
+ * Pointer to the queue to search.
+ *
+ * @return
+ * Nonzero if the queue is used by a flow.
+ */
+int
+priv_flow_rxq_in_use(struct priv *priv, struct rxq *rxq)
+{
+ struct rte_flow *flow;
+
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ unsigned int n;
+
+ if (flow->drop)
+ continue;
+ for (n = 0; n < flow->rxqs_n; ++n) {
+ if (flow->rxqs[n] == rxq)
+ return 1;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c
index 4fcfd3b8..79e0c410 100644
--- a/drivers/net/mlx5/mlx5_mac.c
+++ b/drivers/net/mlx5/mlx5_mac.c
@@ -470,26 +470,30 @@ priv_mac_addrs_enable(struct priv *priv)
* @param vmdq
* VMDq pool index to associate address with (ignored).
*/
-void
+int
mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq)
{
struct priv *priv = dev->data->dev_private;
+ int re;
if (mlx5_is_secondary())
- return;
+ return -ENOTSUP;
(void)vmdq;
priv_lock(priv);
DEBUG("%p: adding MAC address at index %" PRIu32,
(void *)dev, index);
- if (index >= RTE_DIM(priv->mac))
+ if (index >= RTE_DIM(priv->mac)) {
+ re = EINVAL;
goto end;
- priv_mac_addr_add(priv, index,
- (const uint8_t (*)[ETHER_ADDR_LEN])
- mac_addr->addr_bytes);
+ }
+ re = priv_mac_addr_add(priv, index,
+ (const uint8_t (*)[ETHER_ADDR_LEN])
+ mac_addr->addr_bytes);
end:
priv_unlock(priv);
+ return -re;
}
/**
diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h
index ed088eea..608072f7 100644
--- a/drivers/net/mlx5/mlx5_prm.h
+++ b/drivers/net/mlx5/mlx5_prm.h
@@ -34,6 +34,8 @@
#ifndef RTE_PMD_MLX5_PRM_H_
#define RTE_PMD_MLX5_PRM_H_
+#include <assert.h>
+
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
#ifdef PEDANTIC
@@ -44,6 +46,7 @@
#pragma GCC diagnostic error "-Wpedantic"
#endif
+#include <rte_vect.h>
#include "mlx5_autoconf.h"
/* Get CQE owner bit. */
@@ -70,6 +73,9 @@
/* WQE size */
#define MLX5_WQE_SIZE (4 * MLX5_WQE_DWORD_SIZE)
+/* Max size of a WQE session. */
+#define MLX5_WQE_SIZE_MAX 960U
+
/* Compute the number of DS. */
#define MLX5_WQE_DS(n) \
(((n) + MLX5_WQE_DWORD_SIZE - 1) / MLX5_WQE_DWORD_SIZE)
@@ -77,10 +83,19 @@
/* Room for inline data in multi-packet WQE. */
#define MLX5_MWQE64_INL_DATA 28
+/* Default minimum number of Tx queues for inlining packets. */
+#define MLX5_EMPW_MIN_TXQS 8
+
+/* Default max packet length to be inlined. */
+#define MLX5_EMPW_MAX_INLINE_LEN (4U * MLX5_WQE_SIZE)
+
#ifndef HAVE_VERBS_MLX5_OPCODE_TSO
#define MLX5_OPCODE_TSO MLX5_OPCODE_LSO_MPW /* Compat with OFED 3.3. */
#endif
+#define MLX5_OPC_MOD_ENHANCED_MPSW 0
+#define MLX5_OPCODE_ENHANCED_MPSW 0x29
+
/* CQE value to inform that VLAN is stripped. */
#define MLX5_CQE_VLAN_STRIPPED (1u << 0)
@@ -117,6 +132,28 @@
/* Tunnel packet bit in the CQE. */
#define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0)
+/* Inner L3 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4)
+
+/* Inner L4 checksum offload (Tunneled packets only). */
+#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5)
+
+/* Is flow mark valid. */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff00)
+#else
+#define MLX5_FLOW_MARK_IS_VALID(val) ((val) & 0xffffff)
+#endif
+
+/* INVALID is used by packets matching no flow rules. */
+#define MLX5_FLOW_MARK_INVALID 0
+
+/* Maximum allowed value to mark a packet. */
+#define MLX5_FLOW_MARK_MAX 0xfffff0
+
+/* Default mark value used when none is provided. */
+#define MLX5_FLOW_MARK_DEFAULT 0xffffff
+
/* Subset of struct mlx5_wqe_eth_seg. */
struct mlx5_wqe_eth_seg_small {
uint32_t rsvd0;
@@ -126,12 +163,19 @@ struct mlx5_wqe_eth_seg_small {
uint32_t rsvd2;
uint16_t inline_hdr_sz;
uint8_t inline_hdr[2];
-};
+} __rte_aligned(MLX5_WQE_DWORD_SIZE);
struct mlx5_wqe_inl_small {
uint32_t byte_cnt;
uint8_t raw;
-};
+} __rte_aligned(MLX5_WQE_DWORD_SIZE);
+
+struct mlx5_wqe_ctrl {
+ uint32_t ctrl0;
+ uint32_t ctrl1;
+ uint32_t ctrl2;
+ uint32_t ctrl3;
+} __rte_aligned(MLX5_WQE_DWORD_SIZE);
/* Small common part of the WQE. */
struct mlx5_wqe {
@@ -139,16 +183,30 @@ struct mlx5_wqe {
struct mlx5_wqe_eth_seg_small eseg;
};
+/* Vectorize WQE header. */
+struct mlx5_wqe_v {
+ rte_v128u32_t ctrl;
+ rte_v128u32_t eseg;
+};
+
/* WQE. */
struct mlx5_wqe64 {
struct mlx5_wqe hdr;
uint8_t raw[32];
-} __rte_aligned(64);
+} __rte_aligned(MLX5_WQE_SIZE);
+
+/* MPW mode. */
+enum mlx5_mpw_mode {
+ MLX5_MPW_DISABLED,
+ MLX5_MPW,
+ MLX5_MPW_ENHANCED, /* Enhanced Multi-Packet Send WQE, a.k.a MPWv2. */
+};
/* MPW session status. */
enum mlx5_mpw_state {
MLX5_MPW_STATE_OPENED,
MLX5_MPW_INL_STATE_OPENED,
+ MLX5_MPW_ENHANCED_STATE_OPENED,
MLX5_MPW_STATE_CLOSED,
};
@@ -180,10 +238,68 @@ struct mlx5_cqe {
uint8_t rsvd2[12];
uint32_t byte_cnt;
uint64_t timestamp;
- uint8_t rsvd3[4];
+ uint32_t sop_drop_qpn;
uint16_t wqe_counter;
uint8_t rsvd4;
uint8_t op_own;
};
+/**
+ * Convert a user mark to flow mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_set(uint32_t val)
+{
+ uint32_t ret;
+
+ /*
+ * Add one to the user value to differentiate un-marked flows from
+ * marked flows, if the ID is equal to MLX5_FLOW_MARK_DEFAULT it
+ * remains untouched.
+ */
+ if (val != MLX5_FLOW_MARK_DEFAULT)
+ ++val;
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ /*
+ * Mark is 24 bits (minus reserved values) but is stored on a 32 bit
+ * word, byte-swapped by the kernel on little-endian systems. In this
+ * case, left-shifting the resulting big-endian value ensures the
+ * least significant 24 bits are retained when converting it back.
+ */
+ ret = rte_cpu_to_be_32(val) >> 8;
+#else
+ ret = val;
+#endif
+ return ret;
+}
+
+/**
+ * Convert a mark to user mark.
+ *
+ * @param val
+ * Mark value to convert.
+ *
+ * @return
+ * Converted mark value.
+ */
+static inline uint32_t
+mlx5_flow_mark_get(uint32_t val)
+{
+ /*
+ * Subtract one from the retrieved value. It was added by
+ * mlx5_flow_mark_set() to distinguish unmarked flows.
+ */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ return (val >> 8) - 1;
+#else
+ return val - 1;
+#endif
+}
+
#endif /* RTE_PMD_MLX5_PRM_H_ */
diff --git a/drivers/net/mlx5/mlx5_rss.c b/drivers/net/mlx5/mlx5_rss.c
index 0bed74ee..a2dd7d17 100644
--- a/drivers/net/mlx5/mlx5_rss.c
+++ b/drivers/net/mlx5/mlx5_rss.c
@@ -257,13 +257,9 @@ priv_dev_rss_reta_query(struct priv *priv,
{
unsigned int idx;
unsigned int i;
- int ret;
-
- /* See RETA comment in mlx5_dev_infos_get(). */
- ret = priv_rss_reta_index_resize(priv, priv->ind_table_max_size);
- if (ret)
- return ret;
+ if (!reta_size || reta_size > priv->reta_idx_n)
+ return EINVAL;
/* Fill each entry of the table even if its bit is not set. */
for (idx = 0, i = 0; (i != reta_size); ++i) {
idx = i / RTE_RETA_GROUP_SIZE;
@@ -296,8 +292,9 @@ priv_dev_rss_reta_update(struct priv *priv,
unsigned int pos;
int ret;
- /* See RETA comment in mlx5_dev_infos_get(). */
- ret = priv_rss_reta_index_resize(priv, priv->ind_table_max_size);
+ if (!reta_size)
+ return EINVAL;
+ ret = priv_rss_reta_index_resize(priv, reta_size);
if (ret)
return ret;
@@ -360,8 +357,11 @@ mlx5_dev_rss_reta_update(struct rte_eth_dev *dev,
int ret;
struct priv *priv = dev->data->dev_private;
+ mlx5_dev_stop(dev);
priv_lock(priv);
ret = priv_dev_rss_reta_update(priv, reta_conf, reta_size);
priv_unlock(priv);
- return -ret;
+ if (ret)
+ return -ret;
+ return mlx5_dev_start(dev);
}
diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index 28e93d3e..8b782336 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -36,6 +36,7 @@
#include <errno.h>
#include <string.h>
#include <stdint.h>
+#include <fcntl.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
@@ -57,6 +58,8 @@
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_debug.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -741,49 +744,16 @@ rxq_free_elts(struct rxq_ctrl *rxq_ctrl)
void
rxq_cleanup(struct rxq_ctrl *rxq_ctrl)
{
- struct ibv_exp_release_intf_params params;
-
DEBUG("cleaning up %p", (void *)rxq_ctrl);
rxq_free_elts(rxq_ctrl);
if (rxq_ctrl->fdir_queue != NULL)
priv_fdir_queue_destroy(rxq_ctrl->priv, rxq_ctrl->fdir_queue);
- if (rxq_ctrl->if_wq != NULL) {
- assert(rxq_ctrl->priv != NULL);
- assert(rxq_ctrl->priv->ctx != NULL);
- assert(rxq_ctrl->wq != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx,
- rxq_ctrl->if_wq,
- &params));
- }
- if (rxq_ctrl->if_cq != NULL) {
- assert(rxq_ctrl->priv != NULL);
- assert(rxq_ctrl->priv->ctx != NULL);
- assert(rxq_ctrl->cq != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx,
- rxq_ctrl->if_cq,
- &params));
- }
if (rxq_ctrl->wq != NULL)
claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq));
if (rxq_ctrl->cq != NULL)
claim_zero(ibv_destroy_cq(rxq_ctrl->cq));
- if (rxq_ctrl->rd != NULL) {
- struct ibv_exp_destroy_res_domain_attr attr = {
- .comp_mask = 0,
- };
-
- assert(rxq_ctrl->priv != NULL);
- assert(rxq_ctrl->priv->ctx != NULL);
- claim_zero(ibv_exp_destroy_res_domain(rxq_ctrl->priv->ctx,
- rxq_ctrl->rd,
- &attr));
- }
+ if (rxq_ctrl->channel != NULL)
+ claim_zero(ibv_destroy_comp_channel(rxq_ctrl->channel));
if (rxq_ctrl->mr != NULL)
claim_zero(ibv_dereg_mr(rxq_ctrl->mr));
memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
@@ -931,13 +901,10 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
};
struct ibv_exp_wq_attr mod;
union {
- struct ibv_exp_query_intf_params params;
struct ibv_exp_cq_init_attr cq;
- struct ibv_exp_res_domain_init_attr rd;
struct ibv_exp_wq_init_attr wq;
struct ibv_exp_cq_attr cq_attr;
} attr;
- enum ibv_exp_query_intf_status status;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
unsigned int cqe_n = desc - 1;
struct rte_mbuf *(*elts)[desc] = NULL;
@@ -946,14 +913,10 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
(void)conf; /* Thresholds configuration (ignored). */
/* Enable scattered packets support for this queue if necessary. */
assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- /* If smaller than MRU, multi-segment support must be enabled. */
- if (mb_len < (priv->mtu > dev->data->dev_conf.rxmode.max_rx_pkt_len ?
- dev->data->dev_conf.rxmode.max_rx_pkt_len :
- priv->mtu))
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- (mb_len - RTE_PKTMBUF_HEADROOM))) {
+ if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
+ (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ tmpl.rxq.sges_n = 0;
+ } else if (dev->data->dev_conf.rxmode.enable_scatter) {
unsigned int size =
RTE_PKTMBUF_HEADROOM +
dev->data->dev_conf.rxmode.max_rx_pkt_len;
@@ -976,6 +939,13 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
dev->data->dev_conf.rxmode.max_rx_pkt_len);
return EOVERFLOW;
}
+ } else {
+ WARN("%p: the requested maximum Rx packet size (%u) is"
+ " larger than a single mbuf (%u) and scattered"
+ " mode has not been requested",
+ (void *)dev,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ mb_len - RTE_PKTMBUF_HEADROOM);
}
DEBUG("%p: maximum number of segments per packet: %u",
(void *)dev, 1 << tmpl.rxq.sges_n);
@@ -1001,29 +971,25 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
(void *)dev, strerror(ret));
goto error;
}
- attr.rd = (struct ibv_exp_res_domain_init_attr){
- .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
- IBV_EXP_RES_DOMAIN_MSG_MODEL),
- .thread_model = IBV_EXP_THREAD_SINGLE,
- .msg_model = IBV_EXP_MSG_HIGH_BW,
- };
- tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
- if (tmpl.rd == NULL) {
- ret = ENOMEM;
- ERROR("%p: RD creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ tmpl.channel = ibv_create_comp_channel(priv->ctx);
+ if (tmpl.channel == NULL) {
+ dev->data->dev_conf.intr_conf.rxq = 0;
+ ret = ENOMEM;
+ ERROR("%p: Comp Channel creation failure: %s",
+ (void *)dev, strerror(ret));
+ goto error;
+ }
}
attr.cq = (struct ibv_exp_cq_init_attr){
- .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
- .res_domain = tmpl.rd,
+ .comp_mask = 0,
};
if (priv->cqe_comp) {
attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS;
attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;
cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
}
- tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, NULL, 0,
+ tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, tmpl.channel, 0,
&attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
@@ -1048,10 +1014,8 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
.pd = priv->pd,
.cq = tmpl.cq,
.comp_mask =
- IBV_EXP_CREATE_WQ_RES_DOMAIN |
IBV_EXP_CREATE_WQ_VLAN_OFFLOADS |
0,
- .res_domain = tmpl.rd,
.vlan_offloads = (tmpl.rxq.vlan_strip ?
IBV_EXP_RECEIVE_WQ_CVLAN_STRIP :
0),
@@ -1112,29 +1076,6 @@ rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
/* Save port ID. */
tmpl.rxq.port_id = dev->data->port_id;
DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id);
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf_version = 1,
- .intf = IBV_EXP_INTF_CQ,
- .obj = tmpl.cq,
- };
- tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_cq == NULL) {
- ERROR("%p: CQ interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_WQ,
- .obj = tmpl.wq,
- };
- tmpl.if_wq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_wq == NULL) {
- ERROR("%p: WQ interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
/* Change queue state to ready. */
mod = (struct ibv_exp_wq_attr){
.attr_mask = IBV_EXP_WQ_ATTR_STATE,
@@ -1247,6 +1188,19 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
(*priv->rxqs)[idx] = NULL;
rxq_cleanup(rxq_ctrl);
+ /* Resize if rxq size is changed. */
+ if (rxq_ctrl->rxq.elts_n != log2above(desc)) {
+ rxq_ctrl = rte_realloc(rxq_ctrl,
+ sizeof(*rxq_ctrl) +
+ desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE);
+ if (!rxq_ctrl) {
+ ERROR("%p: unable to reallocate queue index %u",
+ (void *)dev, idx);
+ priv_unlock(priv);
+ return -ENOMEM;
+ }
+ }
} else {
rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) +
desc * sizeof(struct rte_mbuf *),
@@ -1295,6 +1249,9 @@ mlx5_rx_queue_release(void *dpdk_rxq)
rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
priv = rxq_ctrl->priv;
priv_lock(priv);
+ if (priv_flow_rxq_in_use(priv, rxq))
+ rte_panic("Rx queue %p is still used by a flow and cannot be"
+ " removed\n", (void *)rxq_ctrl);
for (i = 0; (i != priv->rxqs_n); ++i)
if ((*priv->rxqs)[i] == rxq) {
DEBUG("%p: removing RX queue %p from list",
@@ -1347,3 +1304,113 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts,
rxq = (*priv->rxqs)[index];
return priv->dev->rx_pkt_burst(rxq, pkts, pkts_n);
}
+
+/**
+ * Fill epoll fd list for rxq interrupts.
+ *
+ * @param priv
+ * Private structure.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+int
+priv_intr_efd_enable(struct priv *priv)
+{
+ unsigned int i;
+ unsigned int rxqs_n = priv->rxqs_n;
+ unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ if (n == 0)
+ return 0;
+ if (n < rxqs_n) {
+ WARN("rxqs num is larger than EAL max interrupt vector "
+ "%u > %u unable to supprt rxq interrupts",
+ rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ return -EINVAL;
+ }
+ intr_handle->type = RTE_INTR_HANDLE_EXT;
+ for (i = 0; i != n; ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ struct rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct rxq_ctrl, rxq);
+ int fd = rxq_ctrl->channel->fd;
+ int flags;
+ int rc;
+
+ flags = fcntl(fd, F_GETFL);
+ rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+ if (rc < 0) {
+ WARN("failed to change rxq interrupt file "
+ "descriptor %d for queue index %d", fd, i);
+ return -1;
+ }
+ intr_handle->efds[i] = fd;
+ }
+ intr_handle->nb_efd = n;
+ return 0;
+}
+
+/**
+ * Clean epoll fd list for rxq interrupts.
+ *
+ * @param priv
+ * Private structure.
+ */
+void
+priv_intr_efd_disable(struct priv *priv)
+{
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ rte_intr_free_epoll_fd(intr_handle);
+}
+
+/**
+ * Create and init interrupt vector array.
+ *
+ * @param priv
+ * Private structure.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+int
+priv_create_intr_vec(struct priv *priv)
+{
+ unsigned int rxqs_n = priv->rxqs_n;
+ unsigned int i;
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ if (rxqs_n == 0)
+ return 0;
+ intr_handle->intr_vec = (int *)
+ rte_malloc("intr_vec", rxqs_n * sizeof(int), 0);
+ if (intr_handle->intr_vec == NULL) {
+ WARN("Failed to allocate memory for intr_vec "
+ "rxq interrupt will not be supported");
+ return -ENOMEM;
+ }
+ for (i = 0; i != rxqs_n; ++i) {
+ /* 1:1 mapping between rxq and interrupt. */
+ intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + i;
+ }
+ return 0;
+}
+
+/**
+ * Destroy init interrupt vector array.
+ *
+ * @param priv
+ * Private structure.
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+void
+priv_destroy_intr_vec(struct priv *priv)
+{
+ struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+ rte_free(intr_handle->intr_vec);
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 3997b27a..de6e0fa4 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -74,6 +74,9 @@ check_cqe(volatile struct mlx5_cqe *cqe,
unsigned int cqes_n, const uint16_t ci)
__attribute__((always_inline));
+static inline void
+txq_complete(struct txq *txq) __attribute__((always_inline));
+
static inline uint32_t
txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
__attribute__((always_inline));
@@ -110,7 +113,7 @@ static inline int
check_cqe_seen(volatile struct mlx5_cqe *cqe)
{
static const uint8_t magic[] = "seen";
- volatile uint8_t (*buf)[sizeof(cqe->rsvd3)] = &cqe->rsvd3;
+ volatile uint8_t (*buf)[sizeof(cqe->rsvd0)] = &cqe->rsvd0;
int ret = 1;
unsigned int i;
@@ -173,8 +176,79 @@ check_cqe(volatile struct mlx5_cqe *cqe,
return 0;
}
-static inline void
-txq_complete(struct txq *txq) __attribute__((always_inline));
+/**
+ * Return the address of the WQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe_ci
+ * WQE consumer index.
+ *
+ * @return
+ * WQE address.
+ */
+static inline uintptr_t *
+tx_mlx5_wqe(struct txq *txq, uint16_t ci)
+{
+ ci &= ((1 << txq->wqe_n) - 1);
+ return (uintptr_t *)((uintptr_t)txq->wqes + ci * MLX5_WQE_SIZE);
+}
+
+/**
+ * Return the size of tailroom of WQ.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param addr
+ * Pointer to tail of WQ.
+ *
+ * @return
+ * Size of tailroom.
+ */
+static inline size_t
+tx_mlx5_wq_tailroom(struct txq *txq, void *addr)
+{
+ size_t tailroom;
+ tailroom = (uintptr_t)(txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE -
+ (uintptr_t)addr;
+ return tailroom;
+}
+
+/**
+ * Copy data to tailroom of circular queue.
+ *
+ * @param dst
+ * Pointer to destination.
+ * @param src
+ * Pointer to source.
+ * @param n
+ * Number of bytes to copy.
+ * @param base
+ * Pointer to head of queue.
+ * @param tailroom
+ * Size of tailroom from dst.
+ *
+ * @return
+ * Pointer after copied data.
+ */
+static inline void *
+mlx5_copy_to_wq(void *dst, const void *src, size_t n,
+ void *base, size_t tailroom)
+{
+ void *ret;
+
+ if (n > tailroom) {
+ rte_memcpy(dst, src, tailroom);
+ rte_memcpy(base, (void *)((uintptr_t)src + tailroom),
+ n - tailroom);
+ ret = (uint8_t *)base + n - tailroom;
+ } else {
+ rte_memcpy(dst, src, n);
+ ret = (n == tailroom) ? base : (uint8_t *)dst + n;
+ }
+ return ret;
+}
/**
* Manage TX completions.
@@ -194,7 +268,7 @@ txq_complete(struct txq *txq)
uint16_t elts_tail;
uint16_t cq_ci = txq->cq_ci;
volatile struct mlx5_cqe *cqe = NULL;
- volatile struct mlx5_wqe *wqe;
+ volatile struct mlx5_wqe_ctrl *ctrl;
do {
volatile struct mlx5_cqe *tmp;
@@ -220,9 +294,10 @@ txq_complete(struct txq *txq)
} while (1);
if (unlikely(cqe == NULL))
return;
- wqe = &(*txq->wqes)[ntohs(cqe->wqe_counter) &
- ((1 << txq->wqe_n) - 1)].hdr;
- elts_tail = wqe->ctrl[3];
+ txq->wqe_pi = ntohs(cqe->wqe_counter);
+ ctrl = (volatile struct mlx5_wqe_ctrl *)
+ tx_mlx5_wqe(txq, txq->wqe_pi);
+ elts_tail = ctrl->ctrl3;
assert(elts_tail < (1 << txq->wqe_n));
/* Free buffers. */
while (elts_free != elts_tail) {
@@ -326,37 +401,79 @@ mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
}
/**
- * Prefetch a CQE.
+ * DPDK callback to check the status of a tx descriptor.
*
- * @param txq
- * Pointer to TX queue structure.
- * @param cqe_ci
- * CQE consumer index.
+ * @param tx_queue
+ * The tx queue.
+ * @param[in] offset
+ * The index of the descriptor in the ring.
+ *
+ * @return
+ * The status of the tx descriptor.
*/
-static inline void
-tx_prefetch_cqe(struct txq *txq, uint16_t ci)
+int
+mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
- volatile struct mlx5_cqe *cqe;
+ struct txq *txq = tx_queue;
+ const unsigned int elts_n = 1 << txq->elts_n;
+ const unsigned int elts_cnt = elts_n - 1;
+ unsigned int used;
- cqe = &(*txq->cqes)[ci & ((1 << txq->cqe_n) - 1)];
- rte_prefetch0(cqe);
+ txq_complete(txq);
+ used = (txq->elts_head - txq->elts_tail) & elts_cnt;
+ if (offset < used)
+ return RTE_ETH_TX_DESC_FULL;
+ return RTE_ETH_TX_DESC_DONE;
}
/**
- * Prefetch a WQE.
+ * DPDK callback to check the status of a rx descriptor.
*
- * @param txq
- * Pointer to TX queue structure.
- * @param wqe_ci
- * WQE consumer index.
+ * @param rx_queue
+ * The rx queue.
+ * @param[in] offset
+ * The index of the descriptor in the ring.
+ *
+ * @return
+ * The status of the tx descriptor.
*/
-static inline void
-tx_prefetch_wqe(struct txq *txq, uint16_t ci)
+int
+mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
- volatile struct mlx5_wqe64 *wqe;
+ struct rxq *rxq = rx_queue;
+ struct rxq_zip *zip = &rxq->zip;
+ volatile struct mlx5_cqe *cqe;
+ const unsigned int cqe_n = (1 << rxq->cqe_n);
+ const unsigned int cqe_cnt = cqe_n - 1;
+ unsigned int cq_ci;
+ unsigned int used;
+
+ /* if we are processing a compressed cqe */
+ if (zip->ai) {
+ used = zip->cqe_cnt - zip->ca;
+ cq_ci = zip->cq_ci;
+ } else {
+ used = 0;
+ cq_ci = rxq->cq_ci;
+ }
+ cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
+ while (check_cqe(cqe, cqe_n, cq_ci) == 0) {
+ int8_t op_own;
+ unsigned int n;
- wqe = &(*txq->wqes)[ci & ((1 << txq->wqe_n) - 1)];
- rte_prefetch0(wqe);
+ op_own = cqe->op_own;
+ if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
+ n = ntohl(cqe->byte_cnt);
+ else
+ n = 1;
+ cq_ci += n;
+ used += n;
+ cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
+ }
+ used = RTE_MIN(used, (1U << rxq->elts_n) - 1);
+ if (offset < used)
+ return RTE_ETH_RX_DESC_DONE;
+ return RTE_ETH_RX_DESC_AVAIL;
}
/**
@@ -380,9 +497,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
const unsigned int elts_n = 1 << txq->elts_n;
unsigned int i = 0;
unsigned int j = 0;
+ unsigned int k = 0;
unsigned int max;
+ unsigned int max_inline = txq->max_inline;
+ const unsigned int inline_en = !!max_inline && txq->inline_en;
+ uint16_t max_wqe;
unsigned int comp;
- volatile struct mlx5_wqe *wqe = NULL;
+ volatile struct mlx5_wqe_v *wqe = NULL;
+ volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
unsigned int segs_n = 0;
struct rte_mbuf *buf = NULL;
uint8_t *raw;
@@ -390,25 +512,33 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (unlikely(!pkts_n))
return 0;
/* Prefetch first packet cacheline. */
- tx_prefetch_cqe(txq, txq->cq_ci);
- tx_prefetch_cqe(txq, txq->cq_ci + 1);
rte_prefetch0(*pkts);
/* Start processing. */
txq_complete(txq);
max = (elts_n - (elts_head - txq->elts_tail));
if (max > elts_n)
max -= elts_n;
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ if (unlikely(!max_wqe))
+ return 0;
do {
- volatile struct mlx5_wqe_data_seg *dseg = NULL;
+ volatile rte_v128u32_t *dseg = NULL;
uint32_t length;
unsigned int ds = 0;
+ unsigned int sg = 0; /* counter of additional segs attached. */
uintptr_t addr;
+ uint64_t naddr;
+ uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
+ uint16_t tso_header_sz = 0;
+ uint16_t ehdr;
+ uint8_t cs_flags = 0;
+ uint64_t tso = 0;
#ifdef MLX5_PMD_SOFT_COUNTERS
uint32_t total_length = 0;
#endif
/* first_seg */
- buf = *(pkts++);
+ buf = *pkts;
segs_n = buf->nb_segs;
/*
* Make sure there is enough room to store this packet and
@@ -419,104 +549,204 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
break;
max -= segs_n;
--segs_n;
- if (!segs_n)
- --pkts_n;
- wqe = &(*txq->wqes)[txq->wqe_ci &
- ((1 << txq->wqe_n) - 1)].hdr;
- tx_prefetch_wqe(txq, txq->wqe_ci + 1);
- if (pkts_n > 1)
- rte_prefetch0(*pkts);
+ if (unlikely(--max_wqe == 0))
+ break;
+ wqe = (volatile struct mlx5_wqe_v *)
+ tx_mlx5_wqe(txq, txq->wqe_ci);
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
+ if (pkts_n - i > 1)
+ rte_prefetch0(*(pkts + 1));
addr = rte_pktmbuf_mtod(buf, uintptr_t);
length = DATA_LEN(buf);
+ ehdr = (((uint8_t *)addr)[1] << 8) |
+ ((uint8_t *)addr)[0];
#ifdef MLX5_PMD_SOFT_COUNTERS
total_length = length;
#endif
- assert(length >= MLX5_WQE_DWORD_SIZE);
+ if (length < (MLX5_WQE_DWORD_SIZE + 2))
+ break;
/* Update element. */
(*txq->elts)[elts_head] = buf;
- elts_head = (elts_head + 1) & (elts_n - 1);
/* Prefetch next buffer data. */
- if (pkts_n > 1) {
- volatile void *pkt_addr;
-
- pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *);
- rte_prefetch0(pkt_addr);
- }
+ if (pkts_n - i > 1)
+ rte_prefetch0(
+ rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
/* Should we enable HW CKSUM offload */
if (buf->ol_flags &
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
- wqe->eseg.cs_flags =
- MLX5_ETH_WQE_L3_CSUM |
- MLX5_ETH_WQE_L4_CSUM;
- } else {
- wqe->eseg.cs_flags = 0;
+ const uint64_t is_tunneled = buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ if (is_tunneled && txq->tunnel_en) {
+ cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM |
+ MLX5_ETH_WQE_L4_INNER_CSUM;
+ if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM)
+ cs_flags |= MLX5_ETH_WQE_L3_CSUM;
+ } else {
+ cs_flags = MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ }
}
- raw = (uint8_t *)(uintptr_t)&wqe->eseg.inline_hdr[0];
- /* Start the know and common part of the WQE structure. */
- wqe->ctrl[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
- wqe->ctrl[2] = 0;
- wqe->ctrl[3] = 0;
- wqe->eseg.rsvd0 = 0;
- wqe->eseg.rsvd1 = 0;
- wqe->eseg.mss = 0;
- wqe->eseg.rsvd2 = 0;
- /* Start by copying the Ethernet Header. */
- memcpy((uint8_t *)raw, ((uint8_t *)addr), 16);
- length -= MLX5_WQE_DWORD_SIZE;
- addr += MLX5_WQE_DWORD_SIZE;
+ raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE;
/* Replace the Ethernet type by the VLAN if necessary. */
if (buf->ol_flags & PKT_TX_VLAN_PKT) {
uint32_t vlan = htonl(0x81000000 | buf->vlan_tci);
-
- memcpy((uint8_t *)(raw + MLX5_WQE_DWORD_SIZE -
- sizeof(vlan)),
- &vlan, sizeof(vlan));
- addr -= sizeof(vlan);
- length += sizeof(vlan);
+ unsigned int len = 2 * ETHER_ADDR_LEN - 2;
+
+ addr += 2;
+ length -= 2;
+ /* Copy Destination and source mac address. */
+ memcpy((uint8_t *)raw, ((uint8_t *)addr), len);
+ /* Copy VLAN. */
+ memcpy((uint8_t *)raw + len, &vlan, sizeof(vlan));
+ /* Copy missing two bytes to end the DSeg. */
+ memcpy((uint8_t *)raw + len + sizeof(vlan),
+ ((uint8_t *)addr) + len, 2);
+ addr += len + 2;
+ length -= (len + 2);
+ } else {
+ memcpy((uint8_t *)raw, ((uint8_t *)addr) + 2,
+ MLX5_WQE_DWORD_SIZE);
+ length -= pkt_inline_sz;
+ addr += pkt_inline_sz;
+ }
+ if (txq->tso_en) {
+ tso = buf->ol_flags & PKT_TX_TCP_SEG;
+ if (tso) {
+ uintptr_t end = (uintptr_t)
+ (((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) *
+ MLX5_WQE_SIZE);
+ unsigned int copy_b;
+ uint8_t vlan_sz = (buf->ol_flags &
+ PKT_TX_VLAN_PKT) ? 4 : 0;
+ const uint64_t is_tunneled =
+ buf->ol_flags &
+ (PKT_TX_TUNNEL_GRE |
+ PKT_TX_TUNNEL_VXLAN);
+
+ tso_header_sz = buf->l2_len + vlan_sz +
+ buf->l3_len + buf->l4_len;
+
+ if (is_tunneled && txq->tunnel_en) {
+ tso_header_sz += buf->outer_l2_len +
+ buf->outer_l3_len;
+ cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ } else {
+ cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ }
+ if (unlikely(tso_header_sz >
+ MLX5_MAX_TSO_HEADER))
+ break;
+ copy_b = tso_header_sz - pkt_inline_sz;
+ /* First seg must contain all headers. */
+ assert(copy_b <= length);
+ raw += MLX5_WQE_DWORD_SIZE;
+ if (copy_b &&
+ ((end - (uintptr_t)raw) > copy_b)) {
+ uint16_t n = (MLX5_WQE_DS(copy_b) -
+ 1 + 3) / 4;
+
+ if (unlikely(max_wqe < n))
+ break;
+ max_wqe -= n;
+ rte_memcpy((void *)raw,
+ (void *)addr, copy_b);
+ addr += copy_b;
+ length -= copy_b;
+ pkt_inline_sz += copy_b;
+ /*
+ * Another DWORD will be added
+ * in the inline part.
+ */
+ raw += MLX5_WQE_DS(copy_b) *
+ MLX5_WQE_DWORD_SIZE -
+ MLX5_WQE_DWORD_SIZE;
+ } else {
+ /* NOP WQE. */
+ wqe->ctrl = (rte_v128u32_t){
+ htonl(txq->wqe_ci << 8),
+ htonl(txq->qp_num_8s | 1),
+ 0,
+ 0,
+ };
+ ds = 1;
+ total_length = 0;
+ k++;
+ goto next_wqe;
+ }
+ }
}
/* Inline if enough room. */
- if (txq->max_inline != 0) {
- uintptr_t end =
- (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n];
- uint16_t max_inline =
- txq->max_inline * RTE_CACHE_LINE_SIZE;
- uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE;
- uint16_t room;
+ if (inline_en || tso) {
+ uintptr_t end = (uintptr_t)
+ (((uintptr_t)txq->wqes) +
+ (1 << txq->wqe_n) * MLX5_WQE_SIZE);
+ unsigned int inline_room = max_inline *
+ RTE_CACHE_LINE_SIZE -
+ (pkt_inline_sz - 2);
+ uintptr_t addr_end = (addr + inline_room) &
+ ~(RTE_CACHE_LINE_SIZE - 1);
+ unsigned int copy_b = (addr_end > addr) ?
+ RTE_MIN((addr_end - addr), length) :
+ 0;
raw += MLX5_WQE_DWORD_SIZE;
- room = end - (uintptr_t)raw;
- if (room > max_inline) {
- uintptr_t addr_end = (addr + max_inline) &
- ~(RTE_CACHE_LINE_SIZE - 1);
- uint16_t copy_b = ((addr_end - addr) > length) ?
- length :
- (addr_end - addr);
-
+ if (copy_b && ((end - (uintptr_t)raw) > copy_b)) {
+ /*
+ * One Dseg remains in the current WQE. To
+ * keep the computation positive, it is
+ * removed after the bytes to Dseg conversion.
+ */
+ uint16_t n = (MLX5_WQE_DS(copy_b) - 1 + 3) / 4;
+
+ if (unlikely(max_wqe < n))
+ break;
+ max_wqe -= n;
+ if (tso) {
+ uint32_t inl =
+ htonl(copy_b | MLX5_INLINE_SEG);
+
+ pkt_inline_sz =
+ MLX5_WQE_DS(tso_header_sz) *
+ MLX5_WQE_DWORD_SIZE;
+ rte_memcpy((void *)raw,
+ (void *)&inl, sizeof(inl));
+ raw += sizeof(inl);
+ pkt_inline_sz += sizeof(inl);
+ }
rte_memcpy((void *)raw, (void *)addr, copy_b);
addr += copy_b;
length -= copy_b;
pkt_inline_sz += copy_b;
- /* Sanity check. */
- assert(addr <= addr_end);
}
- /* Store the inlined packet size in the WQE. */
- wqe->eseg.inline_hdr_sz = htons(pkt_inline_sz);
/*
- * 2 DWORDs consumed by the WQE header + 1 DSEG +
+ * 2 DWORDs consumed by the WQE header + ETH segment +
* the size of the inline part of the packet.
*/
ds = 2 + MLX5_WQE_DS(pkt_inline_sz - 2);
if (length > 0) {
- dseg = (struct mlx5_wqe_data_seg *)
- ((uintptr_t)wqe +
- (ds * MLX5_WQE_DWORD_SIZE));
- if ((uintptr_t)dseg >= end)
- dseg = (struct mlx5_wqe_data_seg *)
- ((uintptr_t)&(*txq->wqes)[0]);
+ if (ds % (MLX5_WQE_SIZE /
+ MLX5_WQE_DWORD_SIZE) == 0) {
+ if (unlikely(--max_wqe == 0))
+ break;
+ dseg = (volatile rte_v128u32_t *)
+ tx_mlx5_wqe(txq, txq->wqe_ci +
+ ds / 4);
+ } else {
+ dseg = (volatile rte_v128u32_t *)
+ ((uintptr_t)wqe +
+ (ds * MLX5_WQE_DWORD_SIZE));
+ }
goto use_dseg;
} else if (!segs_n) {
goto next_pkt;
} else {
+ /* dseg will be advance as part of next_seg */
+ dseg = (volatile rte_v128u32_t *)
+ ((uintptr_t)wqe +
+ ((ds - 1) * MLX5_WQE_DWORD_SIZE));
goto next_seg;
}
} else {
@@ -524,16 +754,17 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
* No inline has been done in the packet, only the
* Ethernet Header as been stored.
*/
- wqe->eseg.inline_hdr_sz = htons(MLX5_WQE_DWORD_SIZE);
- dseg = (struct mlx5_wqe_data_seg *)
+ dseg = (volatile rte_v128u32_t *)
((uintptr_t)wqe + (3 * MLX5_WQE_DWORD_SIZE));
ds = 3;
use_dseg:
/* Add the remaining packet as a simple ds. */
- *dseg = (struct mlx5_wqe_data_seg) {
- .addr = htonll(addr),
- .byte_count = htonl(length),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr = htonll(addr);
+ *dseg = (rte_v128u32_t){
+ htonl(length),
+ txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr,
+ naddr >> 32,
};
++ds;
if (!segs_n)
@@ -550,12 +781,12 @@ next_seg:
*/
assert(!(MLX5_WQE_SIZE % MLX5_WQE_DWORD_SIZE));
if (!(ds % (MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE))) {
- unsigned int n = (txq->wqe_ci + ((ds + 3) / 4)) &
- ((1 << txq->wqe_n) - 1);
-
- dseg = (struct mlx5_wqe_data_seg *)
- ((uintptr_t)&(*txq->wqes)[n]);
- tx_prefetch_wqe(txq, n + 1);
+ if (unlikely(--max_wqe == 0))
+ break;
+ dseg = (volatile rte_v128u32_t *)
+ tx_mlx5_wqe(txq, txq->wqe_ci + ds / 4);
+ rte_prefetch0(tx_mlx5_wqe(txq,
+ txq->wqe_ci + ds / 4 + 1));
} else {
++dseg;
}
@@ -567,38 +798,73 @@ next_seg:
total_length += length;
#endif
/* Store segment information. */
- *dseg = (struct mlx5_wqe_data_seg) {
- .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
- .byte_count = htonl(length),
- .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr = htonll(rte_pktmbuf_mtod(buf, uintptr_t));
+ *dseg = (rte_v128u32_t){
+ htonl(length),
+ txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr,
+ naddr >> 32,
};
- (*txq->elts)[elts_head] = buf;
elts_head = (elts_head + 1) & (elts_n - 1);
- ++j;
- --segs_n;
- if (segs_n)
+ (*txq->elts)[elts_head] = buf;
+ ++sg;
+ /* Advance counter only if all segs are successfully posted. */
+ if (sg < segs_n)
goto next_seg;
else
- --pkts_n;
+ j += sg;
next_pkt:
+ elts_head = (elts_head + 1) & (elts_n - 1);
+ ++pkts;
++i;
- wqe->ctrl[1] = htonl(txq->qp_num_8s | ds);
+ /* Initialize known and common part of the WQE structure. */
+ if (tso) {
+ wqe->ctrl = (rte_v128u32_t){
+ htonl((txq->wqe_ci << 8) | MLX5_OPCODE_TSO),
+ htonl(txq->qp_num_8s | ds),
+ 0,
+ 0,
+ };
+ wqe->eseg = (rte_v128u32_t){
+ 0,
+ cs_flags | (htons(buf->tso_segsz) << 16),
+ 0,
+ (ehdr << 16) | htons(tso_header_sz),
+ };
+ } else {
+ wqe->ctrl = (rte_v128u32_t){
+ htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND),
+ htonl(txq->qp_num_8s | ds),
+ 0,
+ 0,
+ };
+ wqe->eseg = (rte_v128u32_t){
+ 0,
+ cs_flags,
+ 0,
+ (ehdr << 16) | htons(pkt_inline_sz),
+ };
+ }
+next_wqe:
txq->wqe_ci += (ds + 3) / 4;
+ /* Save the last successful WQE for completion request */
+ last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += total_length;
#endif
- } while (pkts_n);
+ } while (i < pkts_n);
/* Take a shortcut if nothing must be sent. */
- if (unlikely(i == 0))
+ if (unlikely((i + k) == 0))
return 0;
+ txq->elts_head = (txq->elts_head + i + j) & (elts_n - 1);
/* Check whether completion threshold has been reached. */
- comp = txq->elts_comp + i + j;
+ comp = txq->elts_comp + i + j + k;
if (comp >= MLX5_TX_COMP_THRESH) {
/* Request completion on last WQE. */
- wqe->ctrl[2] = htonl(8);
+ last_wqe->ctrl2 = htonl(8);
/* Save elts_head in unused "immediate" field of WQE. */
- wqe->ctrl[3] = elts_head;
+ last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
} else {
txq->elts_comp = comp;
@@ -608,8 +874,7 @@ next_pkt:
txq->stats.opackets += i;
#endif
/* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
- txq->elts_head = elts_head;
+ mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
return i;
}
@@ -629,13 +894,13 @@ mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] =
(volatile struct mlx5_wqe_data_seg (*)[])
- (uintptr_t)&(*txq->wqes)[(idx + 1) & ((1 << txq->wqe_n) - 1)];
+ tx_mlx5_wqe(txq, idx + 1);
mpw->state = MLX5_MPW_STATE_OPENED;
mpw->pkts_n = 0;
mpw->len = length;
mpw->total_len = 0;
- mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr;
+ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
mpw->wqe->eseg.mss = htons(length);
mpw->wqe->eseg.inline_hdr_sz = 0;
mpw->wqe->eseg.rsvd0 = 0;
@@ -677,8 +942,8 @@ mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw)
++txq->wqe_ci;
else
txq->wqe_ci += 2;
- tx_prefetch_wqe(txq, txq->wqe_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
}
/**
@@ -703,6 +968,7 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
unsigned int i = 0;
unsigned int j = 0;
unsigned int max;
+ uint16_t max_wqe;
unsigned int comp;
struct mlx5_mpw mpw = {
.state = MLX5_MPW_STATE_CLOSED,
@@ -711,14 +977,16 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
if (unlikely(!pkts_n))
return 0;
/* Prefetch first packet cacheline. */
- tx_prefetch_cqe(txq, txq->cq_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
/* Start processing. */
txq_complete(txq);
max = (elts_n - (elts_head - txq->elts_tail));
if (max > elts_n)
max -= elts_n;
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ if (unlikely(!max_wqe))
+ return 0;
do {
struct rte_mbuf *buf = *(pkts++);
unsigned int elts_head_next;
@@ -752,6 +1020,14 @@ mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
(mpw.wqe->eseg.cs_flags != cs_flags)))
mlx5_mpw_close(txq, &mpw);
if (mpw.state == MLX5_MPW_STATE_CLOSED) {
+ /*
+ * Multi-Packet WQE consumes at most two WQE.
+ * mlx5_mpw_new() expects to be able to use such
+ * resources.
+ */
+ if (unlikely(max_wqe < 2))
+ break;
+ max_wqe -= 2;
mlx5_mpw_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
}
@@ -841,7 +1117,7 @@ mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length)
mpw->pkts_n = 0;
mpw->len = length;
mpw->total_len = 0;
- mpw->wqe = (volatile struct mlx5_wqe *)&(*txq->wqes)[idx].hdr;
+ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_MPW << 24) |
(txq->wqe_ci << 8) |
MLX5_OPCODE_TSO);
@@ -907,18 +1183,30 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
unsigned int i = 0;
unsigned int j = 0;
unsigned int max;
+ uint16_t max_wqe;
unsigned int comp;
unsigned int inline_room = txq->max_inline * RTE_CACHE_LINE_SIZE;
struct mlx5_mpw mpw = {
.state = MLX5_MPW_STATE_CLOSED,
};
+ /*
+ * Compute the maximum number of WQE which can be consumed by inline
+ * code.
+ * - 2 DSEG for:
+ * - 1 control segment,
+ * - 1 Ethernet segment,
+ * - N Dseg from the inline request.
+ */
+ const unsigned int wqe_inl_n =
+ ((2 * MLX5_WQE_DWORD_SIZE +
+ txq->max_inline * RTE_CACHE_LINE_SIZE) +
+ RTE_CACHE_LINE_SIZE - 1) / RTE_CACHE_LINE_SIZE;
if (unlikely(!pkts_n))
return 0;
/* Prefetch first packet cacheline. */
- tx_prefetch_cqe(txq, txq->cq_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci);
- tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci));
+ rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
/* Start processing. */
txq_complete(txq);
max = (elts_n - (elts_head - txq->elts_tail));
@@ -944,6 +1232,11 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
break;
max -= segs_n;
--pkts_n;
+ /*
+ * Compute max_wqe in case less WQE were consumed in previous
+ * iteration.
+ */
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
/* Should we enable HW CKSUM offload */
if (buf->ol_flags &
(PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
@@ -969,9 +1262,20 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
if (mpw.state == MLX5_MPW_STATE_CLOSED) {
if ((segs_n != 1) ||
(length > inline_room)) {
+ /*
+ * Multi-Packet WQE consumes at most two WQE.
+ * mlx5_mpw_new() expects to be able to use
+ * such resources.
+ */
+ if (unlikely(max_wqe < 2))
+ break;
+ max_wqe -= 2;
mlx5_mpw_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
} else {
+ if (unlikely(max_wqe < wqe_inl_n))
+ break;
+ max_wqe -= wqe_inl_n;
mlx5_mpw_inline_new(txq, &mpw, length);
mpw.wqe->eseg.cs_flags = cs_flags;
}
@@ -1019,14 +1323,15 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
addr = rte_pktmbuf_mtod(buf, uintptr_t);
(*txq->elts)[elts_head] = buf;
/* Maximum number of bytes before wrapping. */
- max = ((uintptr_t)&(*txq->wqes)[1 << txq->wqe_n] -
+ max = ((((uintptr_t)(txq->wqes)) +
+ (1 << txq->wqe_n) *
+ MLX5_WQE_SIZE) -
(uintptr_t)mpw.data.raw);
if (length > max) {
rte_memcpy((void *)(uintptr_t)mpw.data.raw,
(void *)addr,
max);
- mpw.data.raw =
- (volatile void *)&(*txq->wqes)[0];
+ mpw.data.raw = (volatile void *)txq->wqes;
rte_memcpy((void *)(uintptr_t)mpw.data.raw,
(void *)(addr + max),
length - max);
@@ -1035,12 +1340,13 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
rte_memcpy((void *)(uintptr_t)mpw.data.raw,
(void *)addr,
length);
- mpw.data.raw += length;
+
+ if (length == max)
+ mpw.data.raw =
+ (volatile void *)txq->wqes;
+ else
+ mpw.data.raw += length;
}
- if ((uintptr_t)mpw.data.raw ==
- (uintptr_t)&(*txq->wqes)[1 << txq->wqe_n])
- mpw.data.raw =
- (volatile void *)&(*txq->wqes)[0];
++mpw.pkts_n;
mpw.total_len += length;
++j;
@@ -1091,6 +1397,360 @@ mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts,
}
/**
+ * Open an Enhanced MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ * @param length
+ * Packet length.
+ */
+static inline void
+mlx5_empw_new(struct txq *txq, struct mlx5_mpw *mpw, int padding)
+{
+ uint16_t idx = txq->wqe_ci & ((1 << txq->wqe_n) - 1);
+
+ mpw->state = MLX5_MPW_ENHANCED_STATE_OPENED;
+ mpw->pkts_n = 0;
+ mpw->total_len = sizeof(struct mlx5_wqe);
+ mpw->wqe = (volatile struct mlx5_wqe *)tx_mlx5_wqe(txq, idx);
+ mpw->wqe->ctrl[0] = htonl((MLX5_OPC_MOD_ENHANCED_MPSW << 24) |
+ (txq->wqe_ci << 8) |
+ MLX5_OPCODE_ENHANCED_MPSW);
+ mpw->wqe->ctrl[2] = 0;
+ mpw->wqe->ctrl[3] = 0;
+ memset((void *)(uintptr_t)&mpw->wqe->eseg, 0, MLX5_WQE_DWORD_SIZE);
+ if (unlikely(padding)) {
+ uintptr_t addr = (uintptr_t)(mpw->wqe + 1);
+
+ /* Pad the first 2 DWORDs with zero-length inline header. */
+ *(volatile uint32_t *)addr = htonl(MLX5_INLINE_SEG);
+ *(volatile uint32_t *)(addr + MLX5_WQE_DWORD_SIZE) =
+ htonl(MLX5_INLINE_SEG);
+ mpw->total_len += 2 * MLX5_WQE_DWORD_SIZE;
+ /* Start from the next WQEBB. */
+ mpw->data.raw = (volatile void *)(tx_mlx5_wqe(txq, idx + 1));
+ } else {
+ mpw->data.raw = (volatile void *)(mpw->wqe + 1);
+ }
+}
+
+/**
+ * Close an Enhanced MPW session.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param mpw
+ * Pointer to MPW session structure.
+ *
+ * @return
+ * Number of consumed WQEs.
+ */
+static inline uint16_t
+mlx5_empw_close(struct txq *txq, struct mlx5_mpw *mpw)
+{
+ uint16_t ret;
+
+ /* Store size in multiple of 16 bytes. Control and Ethernet segments
+ * count as 2.
+ */
+ mpw->wqe->ctrl[1] = htonl(txq->qp_num_8s | MLX5_WQE_DS(mpw->total_len));
+ mpw->state = MLX5_MPW_STATE_CLOSED;
+ ret = (mpw->total_len + (MLX5_WQE_SIZE - 1)) / MLX5_WQE_SIZE;
+ txq->wqe_ci += ret;
+ return ret;
+}
+
+/**
+ * DPDK callback for TX with Enhanced MPW support.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t elts_head = txq->elts_head;
+ const unsigned int elts_n = 1 << txq->elts_n;
+ unsigned int i = 0;
+ unsigned int j = 0;
+ unsigned int max_elts;
+ uint16_t max_wqe;
+ unsigned int max_inline = txq->max_inline * RTE_CACHE_LINE_SIZE;
+ unsigned int mpw_room = 0;
+ unsigned int inl_pad = 0;
+ uint32_t inl_hdr;
+ struct mlx5_mpw mpw = {
+ .state = MLX5_MPW_STATE_CLOSED,
+ };
+
+ if (unlikely(!pkts_n))
+ return 0;
+ /* Start processing. */
+ txq_complete(txq);
+ max_elts = (elts_n - (elts_head - txq->elts_tail));
+ if (max_elts > elts_n)
+ max_elts -= elts_n;
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
+ max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
+ if (unlikely(!max_wqe))
+ return 0;
+ do {
+ struct rte_mbuf *buf = *(pkts++);
+ unsigned int elts_head_next;
+ uintptr_t addr;
+ uint64_t naddr;
+ unsigned int n;
+ unsigned int do_inline = 0; /* Whether inline is possible. */
+ uint32_t length;
+ unsigned int segs_n = buf->nb_segs;
+ uint32_t cs_flags = 0;
+
+ /*
+ * Make sure there is enough room to store this packet and
+ * that one ring entry remains unused.
+ */
+ assert(segs_n);
+ if (max_elts - j < segs_n + 1)
+ break;
+ /* Do not bother with large packets MPW cannot handle. */
+ if (segs_n > MLX5_MPW_DSEG_MAX)
+ break;
+ /* Should we enable HW CKSUM offload. */
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM))
+ cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ /* Retrieve packet information. */
+ length = PKT_LEN(buf);
+ /* Start new session if:
+ * - multi-segment packet
+ * - no space left even for a dseg
+ * - next packet can be inlined with a new WQE
+ * - cs_flag differs
+ * It can't be MLX5_MPW_STATE_OPENED as always have a single
+ * segmented packet.
+ */
+ if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED) {
+ if ((segs_n != 1) ||
+ (inl_pad + sizeof(struct mlx5_wqe_data_seg) >
+ mpw_room) ||
+ (length <= txq->inline_max_packet_sz &&
+ inl_pad + sizeof(inl_hdr) + length >
+ mpw_room) ||
+ (mpw.wqe->eseg.cs_flags != cs_flags))
+ max_wqe -= mlx5_empw_close(txq, &mpw);
+ }
+ if (unlikely(mpw.state == MLX5_MPW_STATE_CLOSED)) {
+ if (unlikely(segs_n != 1)) {
+ /* Fall back to legacy MPW.
+ * A MPW session consumes 2 WQEs at most to
+ * include MLX5_MPW_DSEG_MAX pointers.
+ */
+ if (unlikely(max_wqe < 2))
+ break;
+ mlx5_mpw_new(txq, &mpw, length);
+ } else {
+ /* In Enhanced MPW, inline as much as the budget
+ * is allowed. The remaining space is to be
+ * filled with dsegs. If the title WQEBB isn't
+ * padded, it will have 2 dsegs there.
+ */
+ mpw_room = RTE_MIN(MLX5_WQE_SIZE_MAX,
+ (max_inline ? max_inline :
+ pkts_n * MLX5_WQE_DWORD_SIZE) +
+ MLX5_WQE_SIZE);
+ if (unlikely(max_wqe * MLX5_WQE_SIZE <
+ mpw_room))
+ break;
+ /* Don't pad the title WQEBB to not waste WQ. */
+ mlx5_empw_new(txq, &mpw, 0);
+ mpw_room -= mpw.total_len;
+ inl_pad = 0;
+ do_inline =
+ length <= txq->inline_max_packet_sz &&
+ sizeof(inl_hdr) + length <= mpw_room &&
+ !txq->mpw_hdr_dseg;
+ }
+ mpw.wqe->eseg.cs_flags = cs_flags;
+ } else {
+ /* Evaluate whether the next packet can be inlined.
+ * Inlininig is possible when:
+ * - length is less than configured value
+ * - length fits for remaining space
+ * - not required to fill the title WQEBB with dsegs
+ */
+ do_inline =
+ length <= txq->inline_max_packet_sz &&
+ inl_pad + sizeof(inl_hdr) + length <=
+ mpw_room &&
+ (!txq->mpw_hdr_dseg ||
+ mpw.total_len >= MLX5_WQE_SIZE);
+ }
+ /* Multi-segment packets must be alone in their MPW. */
+ assert((segs_n == 1) || (mpw.pkts_n == 0));
+ if (unlikely(mpw.state == MLX5_MPW_STATE_OPENED)) {
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length = 0;
+#endif
+ do {
+ volatile struct mlx5_wqe_data_seg *dseg;
+
+ elts_head_next =
+ (elts_head + 1) & (elts_n - 1);
+ assert(buf);
+ (*txq->elts)[elts_head] = buf;
+ dseg = mpw.data.dseg[mpw.pkts_n];
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ *dseg = (struct mlx5_wqe_data_seg){
+ .byte_count = htonl(DATA_LEN(buf)),
+ .lkey = txq_mp2mr(txq, txq_mb2mp(buf)),
+ .addr = htonll(addr),
+ };
+ elts_head = elts_head_next;
+#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
+ length += DATA_LEN(buf);
+#endif
+ buf = buf->next;
+ ++j;
+ ++mpw.pkts_n;
+ } while (--segs_n);
+ /* A multi-segmented packet takes one MPW session.
+ * TODO: Pack more multi-segmented packets if possible.
+ */
+ mlx5_mpw_close(txq, &mpw);
+ if (mpw.pkts_n < 3)
+ max_wqe--;
+ else
+ max_wqe -= 2;
+ } else if (do_inline) {
+ /* Inline packet into WQE. */
+ unsigned int max;
+
+ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
+ assert(length == DATA_LEN(buf));
+ inl_hdr = htonl(length | MLX5_INLINE_SEG);
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ mpw.data.raw = (volatile void *)
+ ((uintptr_t)mpw.data.raw + inl_pad);
+ max = tx_mlx5_wq_tailroom(txq,
+ (void *)(uintptr_t)mpw.data.raw);
+ /* Copy inline header. */
+ mpw.data.raw = (volatile void *)
+ mlx5_copy_to_wq(
+ (void *)(uintptr_t)mpw.data.raw,
+ &inl_hdr,
+ sizeof(inl_hdr),
+ (void *)(uintptr_t)txq->wqes,
+ max);
+ max = tx_mlx5_wq_tailroom(txq,
+ (void *)(uintptr_t)mpw.data.raw);
+ /* Copy packet data. */
+ mpw.data.raw = (volatile void *)
+ mlx5_copy_to_wq(
+ (void *)(uintptr_t)mpw.data.raw,
+ (void *)addr,
+ length,
+ (void *)(uintptr_t)txq->wqes,
+ max);
+ ++mpw.pkts_n;
+ mpw.total_len += (inl_pad + sizeof(inl_hdr) + length);
+ /* No need to get completion as the entire packet is
+ * copied to WQ. Free the buf right away.
+ */
+ elts_head_next = elts_head;
+ rte_pktmbuf_free_seg(buf);
+ mpw_room -= (inl_pad + sizeof(inl_hdr) + length);
+ /* Add pad in the next packet if any. */
+ inl_pad = (((uintptr_t)mpw.data.raw +
+ (MLX5_WQE_DWORD_SIZE - 1)) &
+ ~(MLX5_WQE_DWORD_SIZE - 1)) -
+ (uintptr_t)mpw.data.raw;
+ } else {
+ /* No inline. Load a dseg of packet pointer. */
+ volatile rte_v128u32_t *dseg;
+
+ assert(mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED);
+ assert((inl_pad + sizeof(*dseg)) <= mpw_room);
+ assert(length == DATA_LEN(buf));
+ if (!tx_mlx5_wq_tailroom(txq,
+ (void *)((uintptr_t)mpw.data.raw
+ + inl_pad)))
+ dseg = (volatile void *)txq->wqes;
+ else
+ dseg = (volatile void *)
+ ((uintptr_t)mpw.data.raw +
+ inl_pad);
+ elts_head_next = (elts_head + 1) & (elts_n - 1);
+ (*txq->elts)[elts_head] = buf;
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++)
+ rte_prefetch2((void *)(addr +
+ n * RTE_CACHE_LINE_SIZE));
+ naddr = htonll(addr);
+ *dseg = (rte_v128u32_t) {
+ htonl(length),
+ txq_mp2mr(txq, txq_mb2mp(buf)),
+ naddr,
+ naddr >> 32,
+ };
+ mpw.data.raw = (volatile void *)(dseg + 1);
+ mpw.total_len += (inl_pad + sizeof(*dseg));
+ ++j;
+ ++mpw.pkts_n;
+ mpw_room -= (inl_pad + sizeof(*dseg));
+ inl_pad = 0;
+ }
+ elts_head = elts_head_next;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += length;
+#endif
+ ++i;
+ } while (i < pkts_n);
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Check whether completion threshold has been reached. */
+ if (txq->elts_comp + j >= MLX5_TX_COMP_THRESH ||
+ (uint16_t)(txq->wqe_ci - txq->mpw_comp) >=
+ (1 << txq->wqe_n) / MLX5_TX_COMP_THRESH_INLINE_DIV) {
+ volatile struct mlx5_wqe *wqe = mpw.wqe;
+
+ /* Request completion on last WQE. */
+ wqe->ctrl[2] = htonl(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->ctrl[3] = elts_head;
+ txq->elts_comp = 0;
+ txq->mpw_comp = txq->wqe_ci;
+ txq->cq_pi++;
+ } else {
+ txq->elts_comp += j;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
+ if (mpw.state == MLX5_MPW_ENHANCED_STATE_OPENED)
+ mlx5_empw_close(txq, &mpw);
+ else if (mpw.state == MLX5_MPW_STATE_OPENED)
+ mlx5_mpw_close(txq, &mpw);
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec(txq, mpw.wqe);
+ txq->elts_head = elts_head;
+ return i;
+}
+
+/**
* Translate RX completion flags to packet type.
*
* @param[in] cqe
@@ -1153,6 +1813,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
struct rxq_zip *zip = &rxq->zip;
uint16_t cqe_n = cqe_cnt + 1;
int len = 0;
+ uint16_t idx, end;
/* Process compressed data in the CQE and mini arrays. */
if (zip->ai) {
@@ -1163,6 +1824,14 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
len = ntohl((*mc)[zip->ai & 7].byte_cnt);
*rss_hash = ntohl((*mc)[zip->ai & 7].rx_hash_result);
if ((++zip->ai & 7) == 0) {
+ /* Invalidate consumed CQEs */
+ idx = zip->ca;
+ end = zip->na;
+ while (idx != end) {
+ (*rxq->cqes)[idx & cqe_cnt].op_own =
+ MLX5_CQE_INVALIDATE;
+ ++idx;
+ }
/*
* Increment consumer index to skip the number of
* CQEs consumed. Hardware leaves holes in the CQ
@@ -1172,8 +1841,9 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
zip->na += 8;
}
if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) {
- uint16_t idx = rxq->cq_ci;
- uint16_t end = zip->cq_ci;
+ /* Invalidate the rest */
+ idx = zip->ca;
+ end = zip->cq_ci;
while (idx != end) {
(*rxq->cqes)[idx & cqe_cnt].op_own =
@@ -1209,7 +1879,7 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
* special case the second one is located 7 CQEs after
* the initial CQE instead of 8 for subsequent ones.
*/
- zip->ca = rxq->cq_ci & cqe_cnt;
+ zip->ca = rxq->cq_ci;
zip->na = zip->ca + 7;
/* Compute the next non compressed CQE. */
--rxq->cq_ci;
@@ -1218,6 +1888,13 @@ mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
len = ntohl((*mc)[0].byte_cnt);
*rss_hash = ntohl((*mc)[0].rx_hash_result);
zip->ai = 1;
+ /* Prefetch all the entries to be invalidated */
+ idx = zip->ca;
+ end = zip->cq_ci;
+ while (idx != end) {
+ rte_prefetch0(&(*rxq->cqes)[(idx) & cqe_cnt]);
+ ++idx;
+ }
} else {
len = ntohl(cqe->byte_cnt);
*rss_hash = ntohl(cqe->rx_hash_res);
@@ -1290,7 +1967,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
&(*rxq->cqes)[rxq->cq_ci & cqe_cnt];
unsigned int i = 0;
unsigned int rq_ci = rxq->rq_ci << sges_n;
- int len; /* keep its value across iterations. */
+ int len = 0; /* keep its value across iterations. */
while (pkts_n) {
unsigned int idx = rq_ci & wqe_cnt;
@@ -1317,8 +1994,9 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
while (pkt != seg) {
assert(pkt != (*rxq->elts)[idx]);
rep = NEXT(pkt);
- rte_mbuf_refcnt_set(pkt, 0);
- __rte_mbuf_raw_free(pkt);
+ NEXT(pkt) = NULL;
+ NB_SEGS(pkt) = 1;
+ rte_mbuf_raw_free(pkt);
pkt = rep;
}
break;
@@ -1328,14 +2006,12 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt,
&rss_hash_res);
if (!len) {
- rte_mbuf_refcnt_set(rep, 0);
- __rte_mbuf_raw_free(rep);
+ rte_mbuf_raw_free(rep);
break;
}
if (unlikely(len == -1)) {
/* RX error, packet is likely too large. */
- rte_mbuf_refcnt_set(rep, 0);
- __rte_mbuf_raw_free(rep);
+ rte_mbuf_raw_free(rep);
++rxq->stats.idropped;
goto skip;
}
@@ -1348,23 +2024,31 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
pkt->hash.rss = rss_hash_res;
pkt->ol_flags = PKT_RX_RSS_HASH;
}
- if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip |
- rxq->crc_present) {
- if (rxq->csum) {
- pkt->packet_type =
- rxq_cq_to_pkt_type(cqe);
- pkt->ol_flags |=
- rxq_cq_to_ol_flags(rxq, cqe);
- }
- if (cqe->hdr_type_etc &
- MLX5_CQE_VLAN_STRIPPED) {
- pkt->ol_flags |= PKT_RX_VLAN_PKT |
- PKT_RX_VLAN_STRIPPED;
- pkt->vlan_tci = ntohs(cqe->vlan_info);
+ if (rxq->mark &&
+ MLX5_FLOW_MARK_IS_VALID(cqe->sop_drop_qpn)) {
+ pkt->ol_flags |= PKT_RX_FDIR;
+ if (cqe->sop_drop_qpn !=
+ htonl(MLX5_FLOW_MARK_DEFAULT)) {
+ uint32_t mark = cqe->sop_drop_qpn;
+
+ pkt->ol_flags |= PKT_RX_FDIR_ID;
+ pkt->hash.fdir.hi =
+ mlx5_flow_mark_get(mark);
}
- if (rxq->crc_present)
- len -= ETHER_CRC_LEN;
}
+ if (rxq->csum | rxq->csum_l2tun) {
+ pkt->packet_type = rxq_cq_to_pkt_type(cqe);
+ pkt->ol_flags |= rxq_cq_to_ol_flags(rxq, cqe);
+ }
+ if (rxq->vlan_strip &&
+ (cqe->hdr_type_etc &
+ htons(MLX5_CQE_VLAN_STRIPPED))) {
+ pkt->ol_flags |= PKT_RX_VLAN_PKT |
+ PKT_RX_VLAN_STRIPPED;
+ pkt->vlan_tci = ntohs(cqe->vlan_info);
+ }
+ if (rxq->crc_present)
+ len -= ETHER_CRC_LEN;
PKT_LEN(pkt) = len;
}
DATA_LEN(rep) = DATA_LEN(seg);
@@ -1466,3 +2150,76 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
(void)pkts_n;
return 0;
}
+
+/**
+ * DPDK callback for rx queue interrupt enable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * RX queue number
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+int
+mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#ifdef HAVE_UPDATE_CQ_CI
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct ibv_cq *cq = rxq_ctrl->cq;
+ uint16_t ci = rxq->cq_ci;
+ int ret = 0;
+
+ ibv_mlx5_exp_update_cq_ci(cq, ci);
+ ret = ibv_req_notify_cq(cq, 0);
+#else
+ int ret = -1;
+ (void)dev;
+ (void)rx_queue_id;
+#endif
+ if (ret)
+ WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
+ return ret;
+}
+
+/**
+ * DPDK callback for rx queue interrupt disable.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param rx_queue_id
+ * RX queue number
+ *
+ * @return
+ * 0 on success, negative on failure.
+ */
+int
+mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#ifdef HAVE_UPDATE_CQ_CI
+ struct priv *priv = mlx5_get_priv(dev);
+ struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
+ struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
+ struct ibv_cq *cq = rxq_ctrl->cq;
+ struct ibv_cq *ev_cq;
+ void *ev_ctx;
+ int ret = 0;
+
+ ret = ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx);
+ if (ret || ev_cq != cq)
+ ret = -1;
+ else
+ ibv_ack_cq_events(cq, 1);
+#else
+ int ret = -1;
+ (void)dev;
+ (void)rx_queue_id;
+#endif
+ if (ret)
+ WARN("unable to disable interrupt on rx queue %d",
+ rx_queue_id);
+ return ret;
+}
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 909d80e6..8db8eb14 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -114,7 +114,8 @@ struct rxq {
unsigned int elts_n:4; /* Log 2 of Mbufs. */
unsigned int port_id:8;
unsigned int rss_hash:1; /* RSS hash result is enabled. */
- unsigned int :9; /* Remaining bits. */
+ unsigned int mark:1; /* Marked flow available on the queue. */
+ unsigned int :8; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t rq_ci;
@@ -132,11 +133,9 @@ struct rxq_ctrl {
struct priv *priv; /* Back pointer to private data. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_exp_wq *wq; /* Work Queue. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
struct fdir_queue *fdir_queue; /* Flow director queue. */
struct ibv_mr *mr; /* Memory Region (for mp). */
- struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
- struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
+ struct ibv_comp_channel *channel;
unsigned int socket; /* CPU socket ID for allocations. */
struct rxq rxq; /* Data path structure. */
};
@@ -246,15 +245,24 @@ struct txq {
uint16_t elts_head; /* Current index in (*elts)[]. */
uint16_t elts_tail; /* First element awaiting completion. */
uint16_t elts_comp; /* Counter since last completion request. */
+ uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
+ uint16_t cq_pi; /* Producer index for completion queue. */
uint16_t wqe_ci; /* Consumer index for work queue. */
+ uint16_t wqe_pi; /* Producer index for work queue. */
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
uint16_t cqe_n:4; /* Number of CQ elements (in log2). */
uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */
+ uint16_t inline_en:1; /* When set inline is enabled. */
+ uint16_t tso_en:1; /* When set hardware TSO is enabled. */
+ uint16_t tunnel_en:1;
+ /* When set TX offload for tunneled packets are supported. */
+ uint16_t mpw_hdr_dseg:1; /* Enable DSEGs in the title WQEBB. */
uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */
+ uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
uint32_t qp_num_8s; /* QP number shifted by 8. */
volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
- volatile struct mlx5_wqe64 (*wqes)[]; /* Work queue. */
+ volatile void *wqes; /* Work queue (use volatile to write into). */
volatile uint32_t *qp_db; /* Work queue doorbell. */
volatile uint32_t *cq_db; /* Completion queue doorbell. */
volatile void *bf_reg; /* Blueflame register. */
@@ -272,9 +280,6 @@ struct txq_ctrl {
struct priv *priv; /* Back pointer to private data. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
- struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
- struct ibv_exp_res_domain *rd; /* Resource Domain. */
unsigned int socket; /* CPU socket ID for allocations. */
struct txq txq; /* Data path structure. */
};
@@ -293,6 +298,10 @@ int priv_create_hash_rxqs(struct priv *);
void priv_destroy_hash_rxqs(struct priv *);
int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
int priv_rehash_flows(struct priv *);
+int priv_intr_efd_enable(struct priv *priv);
+void priv_intr_efd_disable(struct priv *priv);
+int priv_create_intr_vec(struct priv *priv);
+void priv_destroy_intr_vec(struct priv *priv);
void rxq_cleanup(struct rxq_ctrl *);
int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *);
int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t,
@@ -318,9 +327,14 @@ uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t);
+uint16_t mlx5_tx_burst_empw(void *, struct rte_mbuf **, uint16_t);
uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t);
uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t);
+int mlx5_rx_descriptor_status(void *, uint16_t);
+int mlx5_tx_descriptor_status(void *, uint16_t);
+int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id);
/* mlx5_mr.c */
diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c
index f2b5781a..703f48c3 100644
--- a/drivers/net/mlx5/mlx5_stats.c
+++ b/drivers/net/mlx5/mlx5_stats.c
@@ -31,11 +31,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/sockios.h>
+#include <linux/ethtool.h>
+
/* DPDK headers don't like -pedantic. */
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-Wpedantic"
#endif
#include <rte_ethdev.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-Wpedantic"
#endif
@@ -44,6 +49,274 @@
#include "mlx5_rxtx.h"
#include "mlx5_defs.h"
+struct mlx5_counter_ctrl {
+ /* Name of the counter. */
+ char dpdk_name[RTE_ETH_XSTATS_NAME_SIZE];
+ /* Name of the counter on the device table. */
+ char ctr_name[RTE_ETH_XSTATS_NAME_SIZE];
+};
+
+static const struct mlx5_counter_ctrl mlx5_counters_init[] = {
+ {
+ .dpdk_name = "rx_port_unicast_bytes",
+ .ctr_name = "rx_vport_unicast_bytes",
+ },
+ {
+ .dpdk_name = "rx_port_multicast_bytes",
+ .ctr_name = "rx_vport_multicast_bytes",
+ },
+ {
+ .dpdk_name = "rx_port_broadcast_bytes",
+ .ctr_name = "rx_vport_broadcast_bytes",
+ },
+ {
+ .dpdk_name = "rx_port_unicast_packets",
+ .ctr_name = "rx_vport_unicast_packets",
+ },
+ {
+ .dpdk_name = "rx_port_multicast_packets",
+ .ctr_name = "rx_vport_multicast_packets",
+ },
+ {
+ .dpdk_name = "rx_port_broadcast_packets",
+ .ctr_name = "rx_vport_broadcast_packets",
+ },
+ {
+ .dpdk_name = "tx_port_unicast_bytes",
+ .ctr_name = "tx_vport_unicast_bytes",
+ },
+ {
+ .dpdk_name = "tx_port_multicast_bytes",
+ .ctr_name = "tx_vport_multicast_bytes",
+ },
+ {
+ .dpdk_name = "tx_port_broadcast_bytes",
+ .ctr_name = "tx_vport_broadcast_bytes",
+ },
+ {
+ .dpdk_name = "tx_port_unicast_packets",
+ .ctr_name = "tx_vport_unicast_packets",
+ },
+ {
+ .dpdk_name = "tx_port_multicast_packets",
+ .ctr_name = "tx_vport_multicast_packets",
+ },
+ {
+ .dpdk_name = "tx_port_broadcast_packets",
+ .ctr_name = "tx_vport_broadcast_packets",
+ },
+ {
+ .dpdk_name = "rx_wqe_err",
+ .ctr_name = "rx_wqe_err",
+ },
+ {
+ .dpdk_name = "rx_crc_errors_phy",
+ .ctr_name = "rx_crc_errors_phy",
+ },
+ {
+ .dpdk_name = "rx_in_range_len_errors_phy",
+ .ctr_name = "rx_in_range_len_errors_phy",
+ },
+ {
+ .dpdk_name = "rx_symbol_err_phy",
+ .ctr_name = "rx_symbol_err_phy",
+ },
+ {
+ .dpdk_name = "tx_errors_phy",
+ .ctr_name = "tx_errors_phy",
+ },
+ {
+ .dpdk_name = "rx_out_of_buffer",
+ .ctr_name = "out_of_buffer",
+ },
+};
+
+static const unsigned int xstats_n = RTE_DIM(mlx5_counters_init);
+
+/**
+ * Read device counters table.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] stats
+ * Counters table output buffer.
+ *
+ * @return
+ * 0 on success and stats is filled, negative on error.
+ */
+static int
+priv_read_dev_counters(struct priv *priv, uint64_t *stats)
+{
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ struct ifreq ifr;
+ unsigned int stats_sz = (xstats_ctrl->stats_n * sizeof(uint64_t)) +
+ sizeof(struct ethtool_stats);
+ struct ethtool_stats et_stats[(stats_sz + (
+ sizeof(struct ethtool_stats) - 1)) /
+ sizeof(struct ethtool_stats)];
+
+ et_stats->cmd = ETHTOOL_GSTATS;
+ et_stats->n_stats = xstats_ctrl->stats_n;
+ ifr.ifr_data = (caddr_t)et_stats;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to read statistic values from device");
+ return -1;
+ }
+ for (i = 0; i != xstats_n; ++i) {
+ if (priv_is_ib_cntr(mlx5_counters_init[i].ctr_name))
+ priv_get_cntr_sysfs(priv,
+ mlx5_counters_init[i].ctr_name,
+ &stats[i]);
+ else
+ stats[i] = (uint64_t)
+ et_stats->data[xstats_ctrl->dev_table_idx[i]];
+ }
+ return 0;
+}
+
+/**
+ * Query the number of statistics provided by ETHTOOL.
+ *
+ * @param priv
+ * Pointer to private structure.
+ *
+ * @return
+ * Number of statistics on success, -1 on error.
+ */
+static int
+priv_ethtool_get_stats_n(struct priv *priv) {
+ struct ethtool_drvinfo drvinfo;
+ struct ifreq ifr;
+
+ drvinfo.cmd = ETHTOOL_GDRVINFO;
+ ifr.ifr_data = (caddr_t)&drvinfo;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to query number of statistics");
+ return -1;
+ }
+ return drvinfo.n_stats;
+}
+
+/**
+ * Init the structures to read device counters.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+void
+priv_xstats_init(struct priv *priv)
+{
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ unsigned int j;
+ struct ifreq ifr;
+ struct ethtool_gstrings *strings = NULL;
+ unsigned int dev_stats_n;
+ unsigned int str_sz;
+
+ dev_stats_n = priv_ethtool_get_stats_n(priv);
+ if (dev_stats_n < 1) {
+ WARN("no extended statistics available");
+ return;
+ }
+ xstats_ctrl->stats_n = dev_stats_n;
+ /* Allocate memory to grab stat names and values. */
+ str_sz = dev_stats_n * ETH_GSTRING_LEN;
+ strings = (struct ethtool_gstrings *)
+ rte_malloc("xstats_strings",
+ str_sz + sizeof(struct ethtool_gstrings), 0);
+ if (!strings) {
+ WARN("unable to allocate memory for xstats");
+ return;
+ }
+ strings->cmd = ETHTOOL_GSTRINGS;
+ strings->string_set = ETH_SS_STATS;
+ strings->len = dev_stats_n;
+ ifr.ifr_data = (caddr_t)strings;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get statistic names");
+ goto free;
+ }
+ for (j = 0; j != xstats_n; ++j)
+ xstats_ctrl->dev_table_idx[j] = dev_stats_n;
+ for (i = 0; i != dev_stats_n; ++i) {
+ const char *curr_string = (const char *)
+ &strings->data[i * ETH_GSTRING_LEN];
+
+ for (j = 0; j != xstats_n; ++j) {
+ if (!strcmp(mlx5_counters_init[j].ctr_name,
+ curr_string)) {
+ xstats_ctrl->dev_table_idx[j] = i;
+ break;
+ }
+ }
+ }
+ for (j = 0; j != xstats_n; ++j) {
+ if (priv_is_ib_cntr(mlx5_counters_init[j].ctr_name))
+ continue;
+ if (xstats_ctrl->dev_table_idx[j] >= dev_stats_n) {
+ WARN("counter \"%s\" is not recognized",
+ mlx5_counters_init[j].dpdk_name);
+ goto free;
+ }
+ }
+ /* Copy to base at first time. */
+ assert(xstats_n <= MLX5_MAX_XSTATS);
+ priv_read_dev_counters(priv, xstats_ctrl->base);
+free:
+ rte_free(strings);
+}
+
+/**
+ * Get device extended statistics.
+ *
+ * @param priv
+ * Pointer to private structure.
+ * @param[out] stats
+ * Pointer to rte extended stats table.
+ *
+ * @return
+ * Number of extended stats on success and stats is filled,
+ * negative on error.
+ */
+static int
+priv_xstats_get(struct priv *priv, struct rte_eth_xstat *stats)
+{
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ unsigned int n = xstats_n;
+ uint64_t counters[n];
+
+ if (priv_read_dev_counters(priv, counters) < 0)
+ return -1;
+ for (i = 0; i != xstats_n; ++i) {
+ stats[i].id = i;
+ stats[i].value = (counters[i] - xstats_ctrl->base[i]);
+ }
+ return n;
+}
+
+/**
+ * Reset device extended statistics.
+ *
+ * @param priv
+ * Pointer to private structure.
+ */
+static void
+priv_xstats_reset(struct priv *priv)
+{
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ unsigned int i;
+ unsigned int n = xstats_n;
+ uint64_t counters[n];
+
+ if (priv_read_dev_counters(priv, counters) < 0)
+ return;
+ for (i = 0; i != n; ++i)
+ xstats_ctrl->base[i] = counters[i];
+}
+
/**
* DPDK callback to get device statistics.
*
@@ -142,3 +415,95 @@ mlx5_stats_reset(struct rte_eth_dev *dev)
#endif
priv_unlock(priv);
}
+
+/**
+ * DPDK callback to get extended device statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] stats
+ * Stats table output buffer.
+ * @param n
+ * The size of the stats table.
+ *
+ * @return
+ * Number of xstats on success, negative on failure.
+ */
+int
+mlx5_xstats_get(struct rte_eth_dev *dev,
+ struct rte_eth_xstat *stats, unsigned int n)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ int ret = xstats_n;
+
+ if (n >= xstats_n && stats) {
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ int stats_n;
+
+ priv_lock(priv);
+ stats_n = priv_ethtool_get_stats_n(priv);
+ if (stats_n < 0)
+ return -1;
+ if (xstats_ctrl->stats_n != stats_n)
+ priv_xstats_init(priv);
+ ret = priv_xstats_get(priv, stats);
+ priv_unlock(priv);
+ }
+ return ret;
+}
+
+/**
+ * DPDK callback to clear device extended statistics.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ */
+void
+mlx5_xstats_reset(struct rte_eth_dev *dev)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl;
+ int stats_n;
+
+ priv_lock(priv);
+ stats_n = priv_ethtool_get_stats_n(priv);
+ if (stats_n < 0)
+ return;
+ if (xstats_ctrl->stats_n != stats_n)
+ priv_xstats_init(priv);
+ priv_xstats_reset(priv);
+ priv_unlock(priv);
+}
+
+/**
+ * DPDK callback to retrieve names of extended device statistics
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param[out] xstats_names
+ * Buffer to insert names into.
+ * @param n
+ * Number of names.
+ *
+ * @return
+ * Number of xstats names.
+ */
+int
+mlx5_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned int n)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ unsigned int i;
+
+ if (n >= xstats_n && xstats_names) {
+ priv_lock(priv);
+ for (i = 0; i != xstats_n; ++i) {
+ strncpy(xstats_names[i].name,
+ mlx5_counters_init[i].dpdk_name,
+ RTE_ETH_XSTATS_NAME_SIZE);
+ xstats_names[i].name[RTE_ETH_XSTATS_NAME_SIZE - 1] = 0;
+ }
+ priv_unlock(priv);
+ }
+ return xstats_n;
+}
diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
index d4dccd88..8c5aa691 100644
--- a/drivers/net/mlx5/mlx5_trigger.c
+++ b/drivers/net/mlx5/mlx5_trigger.c
@@ -82,14 +82,33 @@ mlx5_dev_start(struct rte_eth_dev *dev)
ERROR("%p: an error occurred while configuring hash RX queues:"
" %s",
(void *)priv, strerror(err));
- /* Rollback. */
- priv_special_flow_disable_all(priv);
- priv_mac_addrs_disable(priv);
- priv_destroy_hash_rxqs(priv);
+ goto error;
}
if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE)
priv_fdir_enable(priv);
+ err = priv_flow_start(priv);
+ if (err) {
+ priv->started = 0;
+ ERROR("%p: an error occurred while configuring flows:"
+ " %s",
+ (void *)priv, strerror(err));
+ goto error;
+ }
priv_dev_interrupt_handler_install(priv, dev);
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ err = priv_intr_efd_enable(priv);
+ if (!err)
+ err = priv_create_intr_vec(priv);
+ }
+ priv_xstats_init(priv);
+ priv_unlock(priv);
+ return 0;
+error:
+ /* Rollback. */
+ priv_special_flow_disable_all(priv);
+ priv_mac_addrs_disable(priv);
+ priv_destroy_hash_rxqs(priv);
+ priv_flow_stop(priv);
priv_unlock(priv);
return -err;
}
@@ -120,7 +139,12 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv);
priv_fdir_disable(priv);
+ priv_flow_stop(priv);
priv_dev_interrupt_handler_uninstall(priv, dev);
+ if (priv->dev->data->dev_conf.intr_conf.rxq) {
+ priv_destroy_intr_vec(priv);
+ priv_intr_efd_disable(priv);
+ }
priv->started = 0;
priv_unlock(priv);
}
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 439908fc..de7e28be 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -82,7 +82,9 @@ txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
for (i = 0; (i != elts_n); ++i)
(*txq_ctrl->txq.elts)[i] = NULL;
for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) {
- volatile struct mlx5_wqe64 *wqe = &(*txq_ctrl->txq.wqes)[i];
+ volatile struct mlx5_wqe64 *wqe =
+ (volatile struct mlx5_wqe64 *)
+ txq_ctrl->txq.wqes + i;
memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
}
@@ -138,48 +140,14 @@ txq_free_elts(struct txq_ctrl *txq_ctrl)
void
txq_cleanup(struct txq_ctrl *txq_ctrl)
{
- struct ibv_exp_release_intf_params params;
size_t i;
DEBUG("cleaning up %p", (void *)txq_ctrl);
txq_free_elts(txq_ctrl);
- if (txq_ctrl->if_qp != NULL) {
- assert(txq_ctrl->priv != NULL);
- assert(txq_ctrl->priv->ctx != NULL);
- assert(txq_ctrl->qp != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
- txq_ctrl->if_qp,
- &params));
- }
- if (txq_ctrl->if_cq != NULL) {
- assert(txq_ctrl->priv != NULL);
- assert(txq_ctrl->priv->ctx != NULL);
- assert(txq_ctrl->cq != NULL);
- params = (struct ibv_exp_release_intf_params){
- .comp_mask = 0,
- };
- claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
- txq_ctrl->if_cq,
- &params));
- }
if (txq_ctrl->qp != NULL)
claim_zero(ibv_destroy_qp(txq_ctrl->qp));
if (txq_ctrl->cq != NULL)
claim_zero(ibv_destroy_cq(txq_ctrl->cq));
- if (txq_ctrl->rd != NULL) {
- struct ibv_exp_destroy_res_domain_attr attr = {
- .comp_mask = 0,
- };
-
- assert(txq_ctrl->priv != NULL);
- assert(txq_ctrl->priv->ctx != NULL);
- claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->priv->ctx,
- txq_ctrl->rd,
- &attr));
- }
for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
if (txq_ctrl->txq.mp2mr[i].mp == NULL)
break;
@@ -214,9 +182,7 @@ txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
}
tmpl->txq.cqe_n = log2above(ibcq->cqe);
tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
- tmpl->txq.wqes =
- (volatile struct mlx5_wqe64 (*)[])
- (uintptr_t)qp->gen_data.sqstart;
+ tmpl->txq.wqes = qp->gen_data.sqstart;
tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
tmpl->txq.bf_reg = qp->gen_data.bf->reg;
@@ -258,14 +224,15 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
.socket = socket,
};
union {
- struct ibv_exp_query_intf_params params;
struct ibv_exp_qp_init_attr init;
- struct ibv_exp_res_domain_init_attr rd;
struct ibv_exp_cq_init_attr cq;
struct ibv_exp_qp_attr mod;
struct ibv_exp_cq_attr cq_attr;
} attr;
- enum ibv_exp_query_intf_status status;
+ unsigned int cqe_n;
+ const unsigned int max_tso_inline = ((MLX5_MAX_TSO_HEADER +
+ (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE);
int ret = 0;
if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
@@ -276,27 +243,18 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
(void)conf; /* Thresholds configuration (ignored). */
assert(desc > MLX5_TX_COMP_THRESH);
tmpl.txq.elts_n = log2above(desc);
+ if (priv->mps == MLX5_MPW_ENHANCED)
+ tmpl.txq.mpw_hdr_dseg = priv->mpw_hdr_dseg;
/* MRs will be registered in mp2mr[] later. */
- attr.rd = (struct ibv_exp_res_domain_init_attr){
- .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
- IBV_EXP_RES_DOMAIN_MSG_MODEL),
- .thread_model = IBV_EXP_THREAD_SINGLE,
- .msg_model = IBV_EXP_MSG_HIGH_BW,
- };
- tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
- if (tmpl.rd == NULL) {
- ret = ENOMEM;
- ERROR("%p: RD creation failure: %s",
- (void *)dev, strerror(ret));
- goto error;
- }
attr.cq = (struct ibv_exp_cq_init_attr){
- .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
- .res_domain = tmpl.rd,
+ .comp_mask = 0,
};
+ cqe_n = ((desc / MLX5_TX_COMP_THRESH) - 1) ?
+ ((desc / MLX5_TX_COMP_THRESH) - 1) : 1;
+ if (priv->mps == MLX5_MPW_ENHANCED)
+ cqe_n += MLX5_TX_COMP_THRESH_INLINE_DIV;
tmpl.cq = ibv_exp_create_cq(priv->ctx,
- (((desc / MLX5_TX_COMP_THRESH) - 1) ?
- ((desc / MLX5_TX_COMP_THRESH) - 1) : 1),
+ cqe_n,
NULL, NULL, 0, &attr.cq);
if (tmpl.cq == NULL) {
ret = ENOMEM;
@@ -332,17 +290,52 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
* TX burst. */
.sq_sig_all = 0,
.pd = priv->pd,
- .res_domain = tmpl.rd,
- .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
- IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
+ .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
};
if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
tmpl.txq.max_inline =
((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
RTE_CACHE_LINE_SIZE);
- attr.init.cap.max_inline_data =
- tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
+ tmpl.txq.inline_en = 1;
+ /* TSO and MPS can't be enabled concurrently. */
+ assert(!priv->tso || !priv->mps);
+ if (priv->mps == MLX5_MPW_ENHANCED) {
+ tmpl.txq.inline_max_packet_sz =
+ priv->inline_max_packet_sz;
+ /* To minimize the size of data set, avoid requesting
+ * too large WQ.
+ */
+ attr.init.cap.max_inline_data =
+ ((RTE_MIN(priv->txq_inline,
+ priv->inline_max_packet_sz) +
+ (RTE_CACHE_LINE_SIZE - 1)) /
+ RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
+ } else if (priv->tso) {
+ int inline_diff = tmpl.txq.max_inline - max_tso_inline;
+
+ /*
+ * Adjust inline value as Verbs aggregates
+ * tso_inline and txq_inline fields.
+ */
+ attr.init.cap.max_inline_data = inline_diff > 0 ?
+ inline_diff *
+ RTE_CACHE_LINE_SIZE :
+ 0;
+ } else {
+ attr.init.cap.max_inline_data =
+ tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
+ }
}
+ if (priv->tso) {
+ attr.init.max_tso_header =
+ max_tso_inline * RTE_CACHE_LINE_SIZE;
+ attr.init.comp_mask |= IBV_EXP_QP_INIT_ATTR_MAX_TSO_HEADER;
+ tmpl.txq.max_inline = RTE_MAX(tmpl.txq.max_inline,
+ max_tso_inline);
+ tmpl.txq.tso_en = 1;
+ }
+ if (priv->tunnel_en)
+ tmpl.txq.tunnel_en = 1;
tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
if (tmpl.qp == NULL) {
ret = (errno ? errno : EINVAL);
@@ -391,36 +384,6 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
(void *)dev, strerror(ret));
goto error;
}
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_CQ,
- .obj = tmpl.cq,
- };
- tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_cq == NULL) {
- ret = EINVAL;
- ERROR("%p: CQ interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
- attr.params = (struct ibv_exp_query_intf_params){
- .intf_scope = IBV_EXP_INTF_GLOBAL,
- .intf = IBV_EXP_INTF_QP_BURST,
- .intf_version = 1,
- .obj = tmpl.qp,
- /* Enable multi-packet send if supported. */
- .family_flags =
- ((priv->mps && !priv->sriov) ?
- IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR :
- 0),
- };
- tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
- if (tmpl.if_qp == NULL) {
- ret = EINVAL;
- ERROR("%p: QP interface family query failed with status %d",
- (void *)dev, status);
- goto error;
- }
/* Clean up txq in case we're reinitializing it. */
DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
txq_cleanup(txq_ctrl);
@@ -496,6 +459,19 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
}
(*priv->txqs)[idx] = NULL;
txq_cleanup(txq_ctrl);
+ /* Resize if txq size is changed. */
+ if (txq_ctrl->txq.elts_n != log2above(desc)) {
+ txq_ctrl = rte_realloc(txq_ctrl,
+ sizeof(*txq_ctrl) +
+ desc * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE);
+ if (!txq_ctrl) {
+ ERROR("%p: unable to reallocate queue index %u",
+ (void *)dev, idx);
+ priv_unlock(priv);
+ return -ENOMEM;
+ }
+ }
} else {
txq_ctrl =
rte_calloc_socket("TXQ", 1,
diff --git a/drivers/net/mpipe/Makefile b/drivers/net/mpipe/Makefile
deleted file mode 100644
index 846e2e07..00000000
--- a/drivers/net/mpipe/Makefile
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# Copyright 2015 EZchip Semiconductor Ltd. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-#
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_pmd_mpipe.a
-
-CFLAGS += $(WERROR_FLAGS) -O3
-LDLIBS += -lgxio
-
-EXPORT_MAP := rte_pmd_mpipe_version.map
-
-LIBABIVER := 1
-
-SRCS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe_tilegx.c
-
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_net
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/mpipe/mpipe_tilegx.c b/drivers/net/mpipe/mpipe_tilegx.c
deleted file mode 100644
index fbbbb002..00000000
--- a/drivers/net/mpipe/mpipe_tilegx.c
+++ /dev/null
@@ -1,1655 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2015 EZchip Semiconductor Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of EZchip Semiconductor nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <unistd.h>
-
-#include <rte_eal.h>
-#include <rte_vdev.h>
-#include <rte_eal_memconfig.h>
-#include <rte_ethdev.h>
-#include <rte_malloc.h>
-#include <rte_cycles.h>
-
-#include <arch/mpipe_xaui_def.h>
-#include <arch/mpipe_gbe_def.h>
-
-#include <gxio/mpipe.h>
-
-#ifdef RTE_LIBRTE_MPIPE_PMD_DEBUG
-#define PMD_DEBUG_RX(...) RTE_LOG(DEBUG, PMD, __VA_ARGS__)
-#define PMD_DEBUG_TX(...) RTE_LOG(DEBUG, PMD, __VA_ARGS__)
-#else
-#define PMD_DEBUG_RX(...)
-#define PMD_DEBUG_TX(...)
-#endif
-
-#define MPIPE_MAX_CHANNELS 128
-#define MPIPE_TX_MAX_QUEUES 128
-#define MPIPE_RX_MAX_QUEUES 16
-#define MPIPE_TX_DESCS 512
-#define MPIPE_RX_BUCKETS 256
-#define MPIPE_RX_STACK_SIZE 65536
-#define MPIPE_RX_IP_ALIGN 2
-#define MPIPE_BSM_ALIGN 128
-
-#define MPIPE_LINK_UPDATE_TIMEOUT 10 /* s */
-#define MPIPE_LINK_UPDATE_INTERVAL 100000 /* us */
-
-struct mpipe_channel_config {
- int enable;
- int first_bucket;
- int num_buckets;
- int head_room;
- gxio_mpipe_rules_stacks_t stacks;
-};
-
-struct mpipe_context {
- rte_spinlock_t lock;
- gxio_mpipe_context_t context;
- struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS];
-};
-
-/* Per-core local data. */
-struct mpipe_local {
- int mbuf_push_debt[RTE_MAX_ETHPORTS]; /* Buffer push debt. */
-} __rte_cache_aligned;
-
-#define MPIPE_BUF_DEBT_THRESHOLD 32
-static __thread struct mpipe_local mpipe_local;
-static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX];
-static int mpipe_instances;
-static const char *drivername = "MPIPE PMD";
-
-/* Per queue statistics. */
-struct mpipe_queue_stats {
- uint64_t packets, bytes, errors, nomem;
-};
-
-/* Common tx/rx queue fields. */
-struct mpipe_queue {
- struct mpipe_dev_priv *priv; /* "priv" data of its device. */
- uint16_t nb_desc; /* Number of tx descriptors. */
- uint16_t port_id; /* Device index. */
- uint16_t stat_idx; /* Queue stats index. */
- uint8_t queue_idx; /* Queue index. */
- uint8_t link_status; /* 0 = link down. */
- struct mpipe_queue_stats stats; /* Stat data for the queue. */
-};
-
-/* Transmit queue description. */
-struct mpipe_tx_queue {
- struct mpipe_queue q; /* Common stuff. */
-};
-
-/* Receive queue description. */
-struct mpipe_rx_queue {
- struct mpipe_queue q; /* Common stuff. */
- gxio_mpipe_iqueue_t iqueue; /* mPIPE iqueue. */
- gxio_mpipe_idesc_t *next_desc; /* Next idesc to process. */
- int avail_descs; /* Number of available descs. */
- void *rx_ring_mem; /* DMA ring memory. */
-};
-
-struct mpipe_dev_priv {
- gxio_mpipe_context_t *context; /* mPIPE context. */
- gxio_mpipe_link_t link; /* mPIPE link for the device. */
- gxio_mpipe_equeue_t equeue; /* mPIPE equeue. */
- unsigned equeue_size; /* mPIPE equeue desc count. */
- int instance; /* mPIPE instance. */
- int ering; /* mPIPE eDMA ring. */
- int stack; /* mPIPE buffer stack. */
- int channel; /* Device channel. */
- int port_id; /* DPDK port index. */
- struct rte_eth_dev *eth_dev; /* DPDK device. */
- struct rte_mbuf **tx_comps; /* TX completion array. */
- struct rte_mempool *rx_mpool; /* mpool used by the rx queues. */
- unsigned rx_offset; /* Receive head room. */
- unsigned rx_size_code; /* mPIPE rx buffer size code. */
- int is_xaui:1, /* Is this an xgbe or gbe? */
- initialized:1, /* Initialized port? */
- running:1; /* Running port? */
- struct ether_addr mac_addr; /* MAC address. */
- unsigned nb_rx_queues; /* Configured tx queues. */
- unsigned nb_tx_queues; /* Configured rx queues. */
- int first_bucket; /* mPIPE bucket start index. */
- int first_ring; /* mPIPE notif ring start index. */
- int notif_group; /* mPIPE notif group. */
- rte_atomic32_t dp_count __rte_cache_aligned; /* DP Entry count. */
- int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
- int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
-};
-
-#define mpipe_priv(dev) \
- ((struct mpipe_dev_priv*)(dev)->data->dev_private)
-
-#define mpipe_name(priv) \
- ((priv)->eth_dev->data->name)
-
-#define mpipe_rx_queue(priv, n) \
- ((struct mpipe_rx_queue *)(priv)->eth_dev->data->rx_queues[n])
-
-#define mpipe_tx_queue(priv, n) \
- ((struct mpipe_tx_queue *)(priv)->eth_dev->data->tx_queues[n])
-
-static void
-mpipe_xmit_flush(struct mpipe_dev_priv *priv);
-
-static void
-mpipe_recv_flush(struct mpipe_dev_priv *priv);
-
-static int mpipe_equeue_sizes[] = {
- [GXIO_MPIPE_EQUEUE_ENTRY_512] = 512,
- [GXIO_MPIPE_EQUEUE_ENTRY_2K] = 2048,
- [GXIO_MPIPE_EQUEUE_ENTRY_8K] = 8192,
- [GXIO_MPIPE_EQUEUE_ENTRY_64K] = 65536,
-};
-
-static int mpipe_iqueue_sizes[] = {
- [GXIO_MPIPE_IQUEUE_ENTRY_128] = 128,
- [GXIO_MPIPE_IQUEUE_ENTRY_512] = 512,
- [GXIO_MPIPE_IQUEUE_ENTRY_2K] = 2048,
- [GXIO_MPIPE_IQUEUE_ENTRY_64K] = 65536,
-};
-
-static int mpipe_buffer_sizes[] = {
- [GXIO_MPIPE_BUFFER_SIZE_128] = 128,
- [GXIO_MPIPE_BUFFER_SIZE_256] = 256,
- [GXIO_MPIPE_BUFFER_SIZE_512] = 512,
- [GXIO_MPIPE_BUFFER_SIZE_1024] = 1024,
- [GXIO_MPIPE_BUFFER_SIZE_1664] = 1664,
- [GXIO_MPIPE_BUFFER_SIZE_4096] = 4096,
- [GXIO_MPIPE_BUFFER_SIZE_10368] = 10368,
- [GXIO_MPIPE_BUFFER_SIZE_16384] = 16384,
-};
-
-static gxio_mpipe_context_t *
-mpipe_context(int instance)
-{
- if (instance < 0 || instance >= mpipe_instances)
- return NULL;
- return &mpipe_contexts[instance].context;
-}
-
-static int mpipe_channel_config(int instance, int channel,
- struct mpipe_channel_config *config)
-{
- struct mpipe_channel_config *data;
- struct mpipe_context *context;
- gxio_mpipe_rules_t rules;
- int idx, rc = 0;
-
- if (instance < 0 || instance >= mpipe_instances ||
- channel < 0 || channel >= MPIPE_MAX_CHANNELS)
- return -EINVAL;
-
- context = &mpipe_contexts[instance];
-
- rte_spinlock_lock(&context->lock);
-
- gxio_mpipe_rules_init(&rules, &context->context);
-
- for (idx = 0; idx < MPIPE_MAX_CHANNELS; idx++) {
- data = (channel == idx) ? config : &context->channels[idx];
-
- if (!data->enable)
- continue;
-
- rc = gxio_mpipe_rules_begin(&rules, data->first_bucket,
- data->num_buckets, &data->stacks);
- if (rc < 0) {
- goto done;
- }
-
- rc = gxio_mpipe_rules_add_channel(&rules, idx);
- if (rc < 0) {
- goto done;
- }
-
- rc = gxio_mpipe_rules_set_headroom(&rules, data->head_room);
- if (rc < 0) {
- goto done;
- }
- }
-
- rc = gxio_mpipe_rules_commit(&rules);
- if (rc == 0) {
- memcpy(&context->channels[channel], config, sizeof(*config));
- }
-
-done:
- rte_spinlock_unlock(&context->lock);
-
- return rc;
-}
-
-static int
-mpipe_get_size_index(int *array, int count, int size,
- bool roundup)
-{
- int i, last = -1;
-
- for (i = 0; i < count && array[i] < size; i++) {
- if (array[i])
- last = i;
- }
-
- if (roundup)
- return i < count ? (int)i : -ENOENT;
- else
- return last >= 0 ? last : -ENOENT;
-}
-
-static int
-mpipe_calc_size(int *array, int count, int size)
-{
- int index = mpipe_get_size_index(array, count, size, 1);
- return index < 0 ? index : array[index];
-}
-
-static int mpipe_equeue_size(int size)
-{
- int result;
- result = mpipe_calc_size(mpipe_equeue_sizes,
- RTE_DIM(mpipe_equeue_sizes), size);
- return result;
-}
-
-static int mpipe_iqueue_size(int size)
-{
- int result;
- result = mpipe_calc_size(mpipe_iqueue_sizes,
- RTE_DIM(mpipe_iqueue_sizes), size);
- return result;
-}
-
-static int mpipe_buffer_size_index(int size)
-{
- int result;
- result = mpipe_get_size_index(mpipe_buffer_sizes,
- RTE_DIM(mpipe_buffer_sizes), size, 0);
- return result;
-}
-
-static inline int
-mpipe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = link;
- struct rte_eth_link *src = &(dev->data->dev_link);
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-static inline int
-mpipe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = &(dev->data->dev_link);
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return -1;
-
- return 0;
-}
-
-static void
-mpipe_infos_get(struct rte_eth_dev *dev __rte_unused,
- struct rte_eth_dev_info *dev_info)
-{
- dev_info->min_rx_bufsize = 128;
- dev_info->max_rx_pktlen = 1518;
- dev_info->max_tx_queues = MPIPE_TX_MAX_QUEUES;
- dev_info->max_rx_queues = MPIPE_RX_MAX_QUEUES;
- dev_info->max_mac_addrs = 1;
- dev_info->rx_offload_capa = 0;
- dev_info->tx_offload_capa = 0;
-}
-
-static int
-mpipe_configure(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
-
- if (dev->data->nb_tx_queues > MPIPE_TX_MAX_QUEUES) {
- RTE_LOG(ERR, PMD, "%s: Too many tx queues: %d > %d\n",
- mpipe_name(priv), dev->data->nb_tx_queues,
- MPIPE_TX_MAX_QUEUES);
- return -EINVAL;
- }
- priv->nb_tx_queues = dev->data->nb_tx_queues;
-
- if (dev->data->nb_rx_queues > MPIPE_RX_MAX_QUEUES) {
- RTE_LOG(ERR, PMD, "%s: Too many rx queues: %d > %d\n",
- mpipe_name(priv), dev->data->nb_rx_queues,
- MPIPE_RX_MAX_QUEUES);
- }
- priv->nb_rx_queues = dev->data->nb_rx_queues;
-
- return 0;
-}
-
-static inline int
-mpipe_link_compare(struct rte_eth_link *link1,
- struct rte_eth_link *link2)
-{
- return (*(uint64_t *)link1 == *(uint64_t *)link2)
- ? -1 : 0;
-}
-
-static int
-mpipe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct rte_eth_link old, new;
- int64_t state, speed;
- int count, rc;
-
- memset(&old, 0, sizeof(old));
- memset(&new, 0, sizeof(new));
- mpipe_dev_atomic_read_link_status(dev, &old);
-
- for (count = 0, rc = 0; count < MPIPE_LINK_UPDATE_TIMEOUT; count++) {
- if (!priv->initialized)
- break;
-
- state = gxio_mpipe_link_get_attr(&priv->link,
- GXIO_MPIPE_LINK_CURRENT_STATE);
- if (state < 0)
- break;
-
- speed = state & GXIO_MPIPE_LINK_SPEED_MASK;
-
- new.link_autoneg = (dev->data->dev_conf.link_speeds &
- ETH_LINK_SPEED_AUTONEG);
- if (speed == GXIO_MPIPE_LINK_1G) {
- new.link_speed = ETH_SPEED_NUM_1G;
- new.link_duplex = ETH_LINK_FULL_DUPLEX;
- new.link_status = ETH_LINK_UP;
- } else if (speed == GXIO_MPIPE_LINK_10G) {
- new.link_speed = ETH_SPEED_NUM_10G;
- new.link_duplex = ETH_LINK_FULL_DUPLEX;
- new.link_status = ETH_LINK_UP;
- }
-
- rc = mpipe_link_compare(&old, &new);
- if (rc == 0 || !wait_to_complete)
- break;
-
- rte_delay_us(MPIPE_LINK_UPDATE_INTERVAL);
- }
-
- mpipe_dev_atomic_write_link_status(dev, &new);
- return rc;
-}
-
-static int
-mpipe_set_link(struct rte_eth_dev *dev, int up)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- int rc;
-
- rc = gxio_mpipe_link_set_attr(&priv->link,
- GXIO_MPIPE_LINK_DESIRED_STATE,
- up ? GXIO_MPIPE_LINK_ANYSPEED : 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to set link %s.\n",
- mpipe_name(priv), up ? "up" : "down");
- } else {
- mpipe_link_update(dev, 0);
- }
-
- return rc;
-}
-
-static int
-mpipe_set_link_up(struct rte_eth_dev *dev)
-{
- return mpipe_set_link(dev, 1);
-}
-
-static int
-mpipe_set_link_down(struct rte_eth_dev *dev)
-{
- return mpipe_set_link(dev, 0);
-}
-
-static inline void
-mpipe_dp_enter(struct mpipe_dev_priv *priv)
-{
- __insn_mtspr(SPR_DSTREAM_PF, 0);
- rte_atomic32_inc(&priv->dp_count);
-}
-
-static inline void
-mpipe_dp_exit(struct mpipe_dev_priv *priv)
-{
- rte_atomic32_dec(&priv->dp_count);
-}
-
-static inline void
-mpipe_dp_wait(struct mpipe_dev_priv *priv)
-{
- while (rte_atomic32_read(&priv->dp_count) != 0) {
- rte_pause();
- }
-}
-
-static inline int
-mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
-{
- return (mbuf->port < RTE_MAX_ETHPORTS) ?
- mpipe_priv(&rte_eth_devices[mbuf->port])->stack :
- priv->stack;
-}
-
-static inline struct rte_mbuf *
-mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc,
- int in_port)
-{
- void *va = gxio_mpipe_idesc_get_va(idesc);
- uint16_t size = gxio_mpipe_idesc_get_xfer_size(idesc);
- struct rte_mbuf *mbuf = RTE_PTR_SUB(va, priv->rx_offset);
-
- rte_pktmbuf_reset(mbuf);
- mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
- mbuf->port = in_port;
- mbuf->data_len = size;
- mbuf->pkt_len = size;
- mbuf->hash.rss = gxio_mpipe_idesc_get_flow_hash(idesc);
-
- PMD_DEBUG_RX("%s: RX mbuf %p, buffer %p, buf_addr %p, size %d\n",
- mpipe_name(priv), mbuf, va, mbuf->buf_addr, size);
-
- return mbuf;
-}
-
-static inline void
-mpipe_recv_push(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
-{
- const int offset = RTE_PKTMBUF_HEADROOM + MPIPE_RX_IP_ALIGN;
- void *buf_addr = RTE_PTR_ADD(mbuf->buf_addr, offset);
-
- gxio_mpipe_push_buffer(priv->context, priv->stack, buf_addr);
- PMD_DEBUG_RX("%s: Pushed mbuf %p, buffer %p into stack %d\n",
- mpipe_name(priv), mbuf, buf_addr, priv->stack);
-}
-
-static inline void
-mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count)
-{
- struct rte_mbuf *mbuf;
- int i;
-
- for (i = 0; i < count; i++) {
- mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
- if (!mbuf)
- break;
- mpipe_recv_push(priv, mbuf);
- }
-
- PMD_DEBUG_RX("%s: Filled %d/%d buffers\n", mpipe_name(priv), i, count);
-}
-
-static inline void
-mpipe_recv_flush_stack(struct mpipe_dev_priv *priv)
-{
- const int offset = priv->rx_offset & ~RTE_MEMPOOL_ALIGN_MASK;
- uint8_t in_port = priv->port_id;
- struct rte_mbuf *mbuf;
- void *va;
-
- while (1) {
- va = gxio_mpipe_pop_buffer(priv->context, priv->stack);
- if (!va)
- break;
- mbuf = RTE_PTR_SUB(va, offset);
-
- PMD_DEBUG_RX("%s: Flushing mbuf %p, va %p\n",
- mpipe_name(priv), mbuf, va);
-
- mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
- mbuf->refcnt = 1;
- mbuf->nb_segs = 1;
- mbuf->port = in_port;
- mbuf->packet_type = 0;
- mbuf->data_len = 0;
- mbuf->pkt_len = 0;
-
- __rte_mbuf_raw_free(mbuf);
- }
-}
-
-static void
-mpipe_register_segment(struct mpipe_dev_priv *priv, const struct rte_memseg *ms)
-{
- size_t size = ms->hugepage_sz;
- uint8_t *addr, *end;
- int rc;
-
- for (addr = ms->addr, end = addr + ms->len; addr < end; addr += size) {
- rc = gxio_mpipe_register_page(priv->context, priv->stack, addr,
- size, 0);
- if (rc < 0)
- break;
- }
-
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Could not register memseg @%p, %d.\n",
- mpipe_name(priv), ms->addr, rc);
- } else {
- RTE_LOG(DEBUG, PMD, "%s: Registered segment %p - %p\n",
- mpipe_name(priv), ms->addr,
- RTE_PTR_ADD(ms->addr, ms->len - 1));
- }
-}
-
-static int
-mpipe_recv_init(struct mpipe_dev_priv *priv)
-{
- const struct rte_memseg *seg = rte_eal_get_physmem_layout();
- size_t stack_size;
- void *stack_mem;
- int rc;
-
- if (!priv->rx_mpool) {
- RTE_LOG(ERR, PMD, "%s: No buffer pool.\n",
- mpipe_name(priv));
- return -ENODEV;
- }
-
- /* Allocate one NotifRing for each queue. */
- rc = gxio_mpipe_alloc_notif_rings(priv->context, MPIPE_RX_MAX_QUEUES,
- 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate notif rings.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->first_ring = rc;
-
- /* Allocate a NotifGroup. */
- rc = gxio_mpipe_alloc_notif_groups(priv->context, 1, 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate rx group.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->notif_group = rc;
-
- /* Allocate required buckets. */
- rc = gxio_mpipe_alloc_buckets(priv->context, MPIPE_RX_BUCKETS, 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate buckets.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->first_bucket = rc;
-
- rc = gxio_mpipe_alloc_buffer_stacks(priv->context, 1, 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer stack.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->stack = rc;
-
- while (seg && seg->addr)
- mpipe_register_segment(priv, seg++);
-
- stack_size = gxio_mpipe_calc_buffer_stack_bytes(MPIPE_RX_STACK_SIZE);
- stack_mem = rte_zmalloc(NULL, stack_size, 65536);
- if (!stack_mem) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer memory.\n",
- mpipe_name(priv));
- return -ENOMEM;
- } else {
- RTE_LOG(DEBUG, PMD, "%s: Buffer stack memory %p - %p.\n",
- mpipe_name(priv), stack_mem,
- RTE_PTR_ADD(stack_mem, stack_size - 1));
- }
-
- rc = gxio_mpipe_init_buffer_stack(priv->context, priv->stack,
- priv->rx_size_code, stack_mem,
- stack_size, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to initialize buffer stack.\n",
- mpipe_name(priv));
- return rc;
- }
-
- return 0;
-}
-
-static int
-mpipe_xmit_init(struct mpipe_dev_priv *priv)
-{
- size_t ring_size;
- void *ring_mem;
- int rc;
-
- /* Allocate eDMA ring. */
- rc = gxio_mpipe_alloc_edma_rings(priv->context, 1, 0, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to alloc tx ring.\n",
- mpipe_name(priv));
- return rc;
- }
- priv->ering = rc;
-
- rc = mpipe_equeue_size(MPIPE_TX_DESCS);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Cannot allocate %d equeue descs.\n",
- mpipe_name(priv), (int)MPIPE_TX_DESCS);
- return -ENOMEM;
- }
- priv->equeue_size = rc;
-
- /* Initialize completion array. */
- ring_size = sizeof(priv->tx_comps[0]) * priv->equeue_size;
- priv->tx_comps = rte_zmalloc(NULL, ring_size, RTE_CACHE_LINE_SIZE);
- if (!priv->tx_comps) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate egress comps.\n",
- mpipe_name(priv));
- return -ENOMEM;
- }
-
- /* Allocate eDMA ring memory. */
- ring_size = sizeof(gxio_mpipe_edesc_t) * priv->equeue_size;
- ring_mem = rte_zmalloc(NULL, ring_size, ring_size);
- if (!ring_mem) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate egress descs.\n",
- mpipe_name(priv));
- return -ENOMEM;
- } else {
- RTE_LOG(DEBUG, PMD, "%s: eDMA ring memory %p - %p.\n",
- mpipe_name(priv), ring_mem,
- RTE_PTR_ADD(ring_mem, ring_size - 1));
- }
-
- /* Initialize eDMA ring. */
- rc = gxio_mpipe_equeue_init(&priv->equeue, priv->context, priv->ering,
- priv->channel, ring_mem, ring_size, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init equeue\n",
- mpipe_name(priv));
- return rc;
- }
-
- return 0;
-}
-
-static int
-mpipe_link_init(struct mpipe_dev_priv *priv)
-{
- int rc;
-
- /* Open the link. */
- rc = gxio_mpipe_link_open(&priv->link, priv->context,
- mpipe_name(priv), GXIO_MPIPE_LINK_AUTO_NONE);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to open link.\n",
- mpipe_name(priv));
- return rc;
- }
-
- /* Get the channel index. */
- rc = gxio_mpipe_link_channel(&priv->link);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Bad channel\n",
- mpipe_name(priv));
- return rc;
- }
- priv->channel = rc;
-
- return 0;
-}
-
-static int
-mpipe_init(struct mpipe_dev_priv *priv)
-{
- int rc;
-
- if (priv->initialized)
- return 0;
-
- rc = mpipe_recv_init(priv);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init rx.\n",
- mpipe_name(priv));
- return rc;
- }
-
- rc = mpipe_xmit_init(priv);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init tx.\n",
- mpipe_name(priv));
- rte_free(priv);
- return rc;
- }
-
- priv->initialized = 1;
-
- return 0;
-}
-
-static int
-mpipe_start(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct mpipe_channel_config config;
- struct mpipe_rx_queue *rx_queue;
- struct rte_eth_link eth_link;
- unsigned queue, buffers = 0;
- size_t ring_size;
- void *ring_mem;
- int rc;
-
- memset(&eth_link, 0, sizeof(eth_link));
- mpipe_dev_atomic_write_link_status(dev, &eth_link);
-
- rc = mpipe_init(priv);
- if (rc < 0)
- return rc;
-
- /* Initialize NotifRings. */
- for (queue = 0; queue < priv->nb_rx_queues; queue++) {
- rx_queue = mpipe_rx_queue(priv, queue);
- ring_size = rx_queue->q.nb_desc * sizeof(gxio_mpipe_idesc_t);
-
- ring_mem = rte_malloc(NULL, ring_size, ring_size);
- if (!ring_mem) {
- RTE_LOG(ERR, PMD, "%s: Failed to alloc rx descs.\n",
- mpipe_name(priv));
- return -ENOMEM;
- } else {
- RTE_LOG(DEBUG, PMD, "%s: iDMA ring %d memory %p - %p.\n",
- mpipe_name(priv), queue, ring_mem,
- RTE_PTR_ADD(ring_mem, ring_size - 1));
- }
-
- rc = gxio_mpipe_iqueue_init(&rx_queue->iqueue, priv->context,
- priv->first_ring + queue, ring_mem,
- ring_size, 0);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init rx queue.\n",
- mpipe_name(priv));
- return rc;
- }
-
- rx_queue->rx_ring_mem = ring_mem;
- buffers += rx_queue->q.nb_desc;
- }
-
- /* Initialize ingress NotifGroup and buckets. */
- rc = gxio_mpipe_init_notif_group_and_buckets(priv->context,
- priv->notif_group, priv->first_ring, priv->nb_rx_queues,
- priv->first_bucket, MPIPE_RX_BUCKETS,
- GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init group and buckets.\n",
- mpipe_name(priv));
- return rc;
- }
-
- /* Configure the classifier to deliver packets from this port. */
- config.enable = 1;
- config.first_bucket = priv->first_bucket;
- config.num_buckets = MPIPE_RX_BUCKETS;
- memset(&config.stacks, 0xff, sizeof(config.stacks));
- config.stacks.stacks[priv->rx_size_code] = priv->stack;
- config.head_room = priv->rx_offset & RTE_MEMPOOL_ALIGN_MASK;
-
- rc = mpipe_channel_config(priv->instance, priv->channel,
- &config);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to setup classifier.\n",
- mpipe_name(priv));
- return rc;
- }
-
- /* Fill empty buffers into the buffer stack. */
- mpipe_recv_fill_stack(priv, buffers);
-
- /* Bring up the link. */
- mpipe_set_link_up(dev);
-
- /* Start xmit/recv on queues. */
- for (queue = 0; queue < priv->nb_tx_queues; queue++)
- mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
- for (queue = 0; queue < priv->nb_rx_queues; queue++)
- mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
- priv->running = 1;
-
- return 0;
-}
-
-static void
-mpipe_stop(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct mpipe_channel_config config;
- unsigned queue;
- int rc;
-
- for (queue = 0; queue < priv->nb_tx_queues; queue++)
- mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
- for (queue = 0; queue < priv->nb_rx_queues; queue++)
- mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
-
- /* Make sure the link_status writes land. */
- rte_wmb();
-
- /*
- * Wait for link_status change to register with straggling datapath
- * threads.
- */
- mpipe_dp_wait(priv);
-
- /* Bring down the link. */
- mpipe_set_link_down(dev);
-
- /* Remove classifier rules. */
- memset(&config, 0, sizeof(config));
- rc = mpipe_channel_config(priv->instance, priv->channel,
- &config);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to stop classifier.\n",
- mpipe_name(priv));
- }
-
- /* Flush completed xmit packets. */
- mpipe_xmit_flush(priv);
-
- /* Flush buffer stacks. */
- mpipe_recv_flush(priv);
-
- priv->running = 0;
-}
-
-static void
-mpipe_close(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- if (priv->running)
- mpipe_stop(dev);
-}
-
-static void
-mpipe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct mpipe_tx_queue *tx_queue;
- struct mpipe_rx_queue *rx_queue;
- unsigned i;
- uint16_t idx;
-
- memset(stats, 0, sizeof(*stats));
-
- for (i = 0; i < priv->nb_tx_queues; i++) {
- tx_queue = mpipe_tx_queue(priv, i);
-
- stats->opackets += tx_queue->q.stats.packets;
- stats->obytes += tx_queue->q.stats.bytes;
- stats->oerrors += tx_queue->q.stats.errors;
-
- idx = tx_queue->q.stat_idx;
- if (idx != (uint16_t)-1) {
- stats->q_opackets[idx] += tx_queue->q.stats.packets;
- stats->q_obytes[idx] += tx_queue->q.stats.bytes;
- stats->q_errors[idx] += tx_queue->q.stats.errors;
- }
- }
-
- for (i = 0; i < priv->nb_rx_queues; i++) {
- rx_queue = mpipe_rx_queue(priv, i);
-
- stats->ipackets += rx_queue->q.stats.packets;
- stats->ibytes += rx_queue->q.stats.bytes;
- stats->ierrors += rx_queue->q.stats.errors;
- stats->rx_nombuf += rx_queue->q.stats.nomem;
-
- idx = rx_queue->q.stat_idx;
- if (idx != (uint16_t)-1) {
- stats->q_ipackets[idx] += rx_queue->q.stats.packets;
- stats->q_ibytes[idx] += rx_queue->q.stats.bytes;
- stats->q_errors[idx] += rx_queue->q.stats.errors;
- }
- }
-}
-
-static void
-mpipe_stats_reset(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- struct mpipe_tx_queue *tx_queue;
- struct mpipe_rx_queue *rx_queue;
- unsigned i;
-
- for (i = 0; i < priv->nb_tx_queues; i++) {
- tx_queue = mpipe_tx_queue(priv, i);
- memset(&tx_queue->q.stats, 0, sizeof(tx_queue->q.stats));
- }
-
- for (i = 0; i < priv->nb_rx_queues; i++) {
- rx_queue = mpipe_rx_queue(priv, i);
- memset(&rx_queue->q.stats, 0, sizeof(rx_queue->q.stats));
- }
-}
-
-static int
-mpipe_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id,
- uint8_t stat_idx, uint8_t is_rx)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
-
- if (is_rx) {
- priv->rx_stat_mapping[stat_idx] = queue_id;
- } else {
- priv->tx_stat_mapping[stat_idx] = queue_id;
- }
-
- return 0;
-}
-
-static int
-mpipe_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
- uint16_t nb_desc, unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
-{
- struct mpipe_tx_queue *tx_queue = dev->data->tx_queues[queue_idx];
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- uint16_t idx;
-
- tx_queue = rte_realloc(tx_queue, sizeof(*tx_queue),
- RTE_CACHE_LINE_SIZE);
- if (!tx_queue) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate TX queue.\n",
- mpipe_name(priv));
- return -ENOMEM;
- }
-
- memset(&tx_queue->q, 0, sizeof(tx_queue->q));
- tx_queue->q.priv = priv;
- tx_queue->q.queue_idx = queue_idx;
- tx_queue->q.port_id = dev->data->port_id;
- tx_queue->q.nb_desc = nb_desc;
-
- tx_queue->q.stat_idx = -1;
- for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
- if (priv->tx_stat_mapping[idx] == queue_idx)
- tx_queue->q.stat_idx = idx;
- }
-
- dev->data->tx_queues[queue_idx] = tx_queue;
-
- return 0;
-}
-
-static void
-mpipe_tx_queue_release(void *_txq)
-{
- rte_free(_txq);
-}
-
-static int
-mpipe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
- uint16_t nb_desc, unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
- struct rte_mempool *mp)
-{
- struct mpipe_rx_queue *rx_queue = dev->data->rx_queues[queue_idx];
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- uint16_t idx;
- int size, rc;
-
- rc = mpipe_iqueue_size(nb_desc);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Cannot allocate %d iqueue descs.\n",
- mpipe_name(priv), (int)nb_desc);
- return -ENOMEM;
- }
-
- if (rc != nb_desc) {
- RTE_LOG(WARNING, PMD, "%s: Extending RX descs from %d to %d.\n",
- mpipe_name(priv), (int)nb_desc, rc);
- nb_desc = rc;
- }
-
- size = sizeof(*rx_queue);
- rx_queue = rte_realloc(rx_queue, size, RTE_CACHE_LINE_SIZE);
- if (!rx_queue) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate RX queue.\n",
- mpipe_name(priv));
- return -ENOMEM;
- }
-
- memset(&rx_queue->q, 0, sizeof(rx_queue->q));
- rx_queue->q.priv = priv;
- rx_queue->q.nb_desc = nb_desc;
- rx_queue->q.port_id = dev->data->port_id;
- rx_queue->q.queue_idx = queue_idx;
-
- if (!priv->rx_mpool) {
- int size = (rte_pktmbuf_data_room_size(mp) -
- RTE_PKTMBUF_HEADROOM -
- MPIPE_RX_IP_ALIGN);
-
- priv->rx_offset = (sizeof(struct rte_mbuf) +
- rte_pktmbuf_priv_size(mp) +
- RTE_PKTMBUF_HEADROOM +
- MPIPE_RX_IP_ALIGN);
- if (size < 0) {
- RTE_LOG(ERR, PMD, "%s: Bad buffer size %d.\n",
- mpipe_name(priv),
- rte_pktmbuf_data_room_size(mp));
- return -ENOMEM;
- }
-
- priv->rx_size_code = mpipe_buffer_size_index(size);
- priv->rx_mpool = mp;
- }
-
- if (priv->rx_mpool != mp) {
- RTE_LOG(WARNING, PMD, "%s: Ignoring multiple buffer pools.\n",
- mpipe_name(priv));
- }
-
- rx_queue->q.stat_idx = -1;
- for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
- if (priv->rx_stat_mapping[idx] == queue_idx)
- rx_queue->q.stat_idx = idx;
- }
-
- dev->data->rx_queues[queue_idx] = rx_queue;
-
- return 0;
-}
-
-static void
-mpipe_rx_queue_release(void *_rxq)
-{
- rte_free(_rxq);
-}
-
-#define MPIPE_XGBE_ENA_HASH_MULTI \
- (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT)
-#define MPIPE_XGBE_ENA_HASH_UNI \
- (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT)
-#define MPIPE_XGBE_COPY_ALL \
- (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT)
-#define MPIPE_GBE_ENA_MULTI_HASH \
- (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT)
-#define MPIPE_GBE_ENA_UNI_HASH \
- (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT)
-#define MPIPE_GBE_COPY_ALL \
- (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT)
-
-static void
-mpipe_promiscuous_enable(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- int64_t reg;
- int addr;
-
- if (priv->is_xaui) {
- addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
- reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
- reg &= ~MPIPE_XGBE_ENA_HASH_MULTI;
- reg &= ~MPIPE_XGBE_ENA_HASH_UNI;
- reg |= MPIPE_XGBE_COPY_ALL;
- gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
- } else {
- addr = MPIPE_GBE_NETWORK_CONFIGURATION;
- reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
- reg &= ~MPIPE_GBE_ENA_MULTI_HASH;
- reg &= ~MPIPE_GBE_ENA_UNI_HASH;
- reg |= MPIPE_GBE_COPY_ALL;
- gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
- }
-}
-
-static void
-mpipe_promiscuous_disable(struct rte_eth_dev *dev)
-{
- struct mpipe_dev_priv *priv = mpipe_priv(dev);
- int64_t reg;
- int addr;
-
- if (priv->is_xaui) {
- addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
- reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
- reg |= MPIPE_XGBE_ENA_HASH_MULTI;
- reg |= MPIPE_XGBE_ENA_HASH_UNI;
- reg &= ~MPIPE_XGBE_COPY_ALL;
- gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
- } else {
- addr = MPIPE_GBE_NETWORK_CONFIGURATION;
- reg = gxio_mpipe_link_mac_rd(&priv->link, addr);
- reg |= MPIPE_GBE_ENA_MULTI_HASH;
- reg |= MPIPE_GBE_ENA_UNI_HASH;
- reg &= ~MPIPE_GBE_COPY_ALL;
- gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
- }
-}
-
-static const struct eth_dev_ops mpipe_dev_ops = {
- .dev_infos_get = mpipe_infos_get,
- .dev_configure = mpipe_configure,
- .dev_start = mpipe_start,
- .dev_stop = mpipe_stop,
- .dev_close = mpipe_close,
- .stats_get = mpipe_stats_get,
- .stats_reset = mpipe_stats_reset,
- .queue_stats_mapping_set = mpipe_queue_stats_mapping_set,
- .tx_queue_setup = mpipe_tx_queue_setup,
- .rx_queue_setup = mpipe_rx_queue_setup,
- .tx_queue_release = mpipe_tx_queue_release,
- .rx_queue_release = mpipe_rx_queue_release,
- .link_update = mpipe_link_update,
- .dev_set_link_up = mpipe_set_link_up,
- .dev_set_link_down = mpipe_set_link_down,
- .promiscuous_enable = mpipe_promiscuous_enable,
- .promiscuous_disable = mpipe_promiscuous_disable,
-};
-
-static inline void
-mpipe_xmit_null(struct mpipe_dev_priv *priv, int64_t start, int64_t end)
-{
- gxio_mpipe_edesc_t null_desc = { { .bound = 1, .ns = 1 } };
- gxio_mpipe_equeue_t *equeue = &priv->equeue;
- int64_t slot;
-
- for (slot = start; slot < end; slot++) {
- gxio_mpipe_equeue_put_at(equeue, null_desc, slot);
- }
-}
-
-static void
-mpipe_xmit_flush(struct mpipe_dev_priv *priv)
-{
- gxio_mpipe_equeue_t *equeue = &priv->equeue;
- int64_t slot;
-
- /* Post a dummy descriptor and wait for its return. */
- slot = gxio_mpipe_equeue_reserve(equeue, 1);
- if (slot < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to reserve stop slot.\n",
- mpipe_name(priv));
- return;
- }
-
- mpipe_xmit_null(priv, slot, slot + 1);
-
- while (!gxio_mpipe_equeue_is_complete(equeue, slot, 1)) {
- rte_pause();
- }
-
- for (slot = 0; slot < priv->equeue_size; slot++) {
- if (priv->tx_comps[slot])
- rte_pktmbuf_free_seg(priv->tx_comps[slot]);
- }
-}
-
-static void
-mpipe_recv_flush(struct mpipe_dev_priv *priv)
-{
- uint8_t in_port = priv->port_id;
- struct mpipe_rx_queue *rx_queue;
- gxio_mpipe_iqueue_t *iqueue;
- gxio_mpipe_idesc_t idesc;
- struct rte_mbuf *mbuf;
- unsigned queue;
-
- /* Release packets on the buffer stack. */
- mpipe_recv_flush_stack(priv);
-
- /* Flush packets sitting in recv queues. */
- for (queue = 0; queue < priv->nb_rx_queues; queue++) {
- rx_queue = mpipe_rx_queue(priv, queue);
- iqueue = &rx_queue->iqueue;
- while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) {
- /* Skip idesc with the 'buffer error' bit set. */
- if (idesc.be)
- continue;
- mbuf = mpipe_recv_mbuf(priv, &idesc, in_port);
- rte_pktmbuf_free(mbuf);
- }
- rte_free(rx_queue->rx_ring_mem);
- }
-}
-
-static inline uint16_t
-mpipe_do_xmit(struct mpipe_tx_queue *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
-{
- struct mpipe_dev_priv *priv = tx_queue->q.priv;
- gxio_mpipe_equeue_t *equeue = &priv->equeue;
- unsigned nb_bytes = 0;
- unsigned nb_sent = 0;
- int nb_slots, i;
- uint8_t port_id;
-
- PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n",
- nb_pkts, mpipe_name(tx_queue->q.priv),
- tx_queue->q.queue_idx);
-
- /* Optimistic assumption that we need exactly one slot per packet. */
- nb_slots = RTE_MIN(nb_pkts, MPIPE_TX_DESCS / 2);
-
- do {
- struct rte_mbuf *mbuf = NULL, *pkt = NULL;
- int64_t slot;
-
- /* Reserve eDMA ring slots. */
- slot = gxio_mpipe_equeue_try_reserve_fast(equeue, nb_slots);
- if (unlikely(slot < 0)) {
- break;
- }
-
- for (i = 0; i < nb_slots; i++) {
- unsigned idx = (slot + i) & (priv->equeue_size - 1);
- rte_prefetch0(priv->tx_comps[idx]);
- }
-
- /* Fill up slots with descriptor and completion info. */
- for (i = 0; i < nb_slots; i++) {
- unsigned idx = (slot + i) & (priv->equeue_size - 1);
- gxio_mpipe_edesc_t desc;
- struct rte_mbuf *next;
-
- /* Starting on a new packet? */
- if (likely(!mbuf)) {
- int room = nb_slots - i;
-
- pkt = mbuf = tx_pkts[nb_sent];
-
- /* Bail out if we run out of descs. */
- if (unlikely(pkt->nb_segs > room))
- break;
-
- nb_sent++;
- }
-
- /* We have a segment to send. */
- next = mbuf->next;
-
- if (priv->tx_comps[idx])
- rte_pktmbuf_free_seg(priv->tx_comps[idx]);
-
- port_id = (mbuf->port < RTE_MAX_ETHPORTS) ?
- mbuf->port : priv->port_id;
- desc = (gxio_mpipe_edesc_t) { {
- .va = rte_pktmbuf_mtod(mbuf, uintptr_t),
- .xfer_size = rte_pktmbuf_data_len(mbuf),
- .bound = next ? 0 : 1,
- .stack_idx = mpipe_mbuf_stack_index(priv, mbuf),
- .size = priv->rx_size_code,
- } };
- if (mpipe_local.mbuf_push_debt[port_id] > 0) {
- mpipe_local.mbuf_push_debt[port_id]--;
- desc.hwb = 1;
- priv->tx_comps[idx] = NULL;
- } else
- priv->tx_comps[idx] = mbuf;
-
- nb_bytes += mbuf->data_len;
- gxio_mpipe_equeue_put_at(equeue, desc, slot + i);
-
- PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n",
- mpipe_name(priv),
- tx_queue->q.queue_idx,
- rte_pktmbuf_mtod(mbuf, void *),
- rte_pktmbuf_data_len(mbuf));
-
- mbuf = next;
- }
-
- if (unlikely(nb_sent < nb_pkts)) {
-
- /* Fill remaining slots with null descriptors. */
- mpipe_xmit_null(priv, slot + i, slot + nb_slots);
-
- /*
- * Calculate exact number of descriptors needed for
- * the next go around.
- */
- nb_slots = 0;
- for (i = nb_sent; i < nb_pkts; i++) {
- nb_slots += tx_pkts[i]->nb_segs;
- }
-
- nb_slots = RTE_MIN(nb_slots, MPIPE_TX_DESCS / 2);
- }
- } while (nb_sent < nb_pkts);
-
- tx_queue->q.stats.packets += nb_sent;
- tx_queue->q.stats.bytes += nb_bytes;
-
- return nb_sent;
-}
-
-static inline uint16_t
-mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
-{
- struct mpipe_dev_priv *priv = rx_queue->q.priv;
- gxio_mpipe_iqueue_t *iqueue = &rx_queue->iqueue;
- gxio_mpipe_idesc_t *first_idesc, *idesc, *last_idesc;
- uint8_t in_port = rx_queue->q.port_id;
- const unsigned look_ahead = 8;
- int room = nb_pkts, rc = 0;
- unsigned nb_packets = 0;
- unsigned nb_dropped = 0;
- unsigned nb_nomem = 0;
- unsigned nb_bytes = 0;
- unsigned nb_descs, i;
-
- while (room && !rc) {
- if (rx_queue->avail_descs < room) {
- rc = gxio_mpipe_iqueue_try_peek(iqueue,
- &rx_queue->next_desc);
- rx_queue->avail_descs = rc < 0 ? 0 : rc;
- }
-
- if (unlikely(!rx_queue->avail_descs)) {
- break;
- }
-
- nb_descs = RTE_MIN(room, rx_queue->avail_descs);
-
- first_idesc = rx_queue->next_desc;
- last_idesc = first_idesc + nb_descs;
-
- rx_queue->next_desc += nb_descs;
- rx_queue->avail_descs -= nb_descs;
-
- for (i = 1; i < look_ahead; i++) {
- rte_prefetch0(first_idesc + i);
- }
-
- PMD_DEBUG_RX("%s:%d: Trying to receive %d packets\n",
- mpipe_name(rx_queue->q.priv),
- rx_queue->q.queue_idx,
- nb_descs);
-
- for (idesc = first_idesc; idesc < last_idesc; idesc++) {
- struct rte_mbuf *mbuf;
-
- PMD_DEBUG_RX("%s:%d: processing idesc %d/%d\n",
- mpipe_name(priv),
- rx_queue->q.queue_idx,
- nb_packets, nb_descs);
-
- rte_prefetch0(idesc + look_ahead);
-
- PMD_DEBUG_RX("%s:%d: idesc %p, %s%s%s%s%s%s%s%s%s%s"
- "size: %d, bkt: %d, chan: %d, ring: %d, sqn: %lu, va: %lu\n",
- mpipe_name(priv),
- rx_queue->q.queue_idx,
- idesc,
- idesc->me ? "me, " : "",
- idesc->tr ? "tr, " : "",
- idesc->ce ? "ce, " : "",
- idesc->ct ? "ct, " : "",
- idesc->cs ? "cs, " : "",
- idesc->nr ? "nr, " : "",
- idesc->sq ? "sq, " : "",
- idesc->ts ? "ts, " : "",
- idesc->ps ? "ps, " : "",
- idesc->be ? "be, " : "",
- idesc->l2_size,
- idesc->bucket_id,
- idesc->channel,
- idesc->notif_ring,
- (unsigned long)idesc->packet_sqn,
- (unsigned long)idesc->va);
-
- if (unlikely(gxio_mpipe_idesc_has_error(idesc))) {
- nb_dropped++;
- gxio_mpipe_iqueue_drop(iqueue, idesc);
- PMD_DEBUG_RX("%s:%d: Descriptor error\n",
- mpipe_name(rx_queue->q.priv),
- rx_queue->q.queue_idx);
- continue;
- }
-
- if (mpipe_local.mbuf_push_debt[in_port] <
- MPIPE_BUF_DEBT_THRESHOLD)
- mpipe_local.mbuf_push_debt[in_port]++;
- else {
- mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
- if (unlikely(!mbuf)) {
- nb_nomem++;
- gxio_mpipe_iqueue_drop(iqueue, idesc);
- PMD_DEBUG_RX("%s:%d: alloc failure\n",
- mpipe_name(rx_queue->q.priv),
- rx_queue->q.queue_idx);
- continue;
- }
-
- mpipe_recv_push(priv, mbuf);
- }
-
- /* Get and setup the mbuf for the received packet. */
- mbuf = mpipe_recv_mbuf(priv, idesc, in_port);
-
- /* Update results and statistics counters. */
- rx_pkts[nb_packets] = mbuf;
- nb_bytes += mbuf->pkt_len;
- nb_packets++;
- }
-
- /*
- * We release the ring in bursts, but do not track and release
- * buckets. This therefore breaks dynamic flow affinity, but
- * we always operate in static affinity mode, and so we're OK
- * with this optimization.
- */
- gxio_mpipe_iqueue_advance(iqueue, nb_descs);
- gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, nb_descs);
-
- /*
- * Go around once more if we haven't yet peeked the queue, and
- * if we have more room to receive.
- */
- room = nb_pkts - nb_packets;
- }
-
- rx_queue->q.stats.packets += nb_packets;
- rx_queue->q.stats.bytes += nb_bytes;
- rx_queue->q.stats.errors += nb_dropped;
- rx_queue->q.stats.nomem += nb_nomem;
-
- PMD_DEBUG_RX("%s:%d: RX: %d/%d pkts/bytes, %d/%d drops/nomem\n",
- mpipe_name(rx_queue->q.priv), rx_queue->q.queue_idx,
- nb_packets, nb_bytes, nb_dropped, nb_nomem);
-
- return nb_packets;
-}
-
-static uint16_t
-mpipe_recv_pkts(void *_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
-{
- struct mpipe_rx_queue *rx_queue = _rxq;
- uint16_t result = 0;
-
- if (rx_queue) {
- mpipe_dp_enter(rx_queue->q.priv);
- if (likely(rx_queue->q.link_status))
- result = mpipe_do_recv(rx_queue, rx_pkts, nb_pkts);
- mpipe_dp_exit(rx_queue->q.priv);
- }
-
- return result;
-}
-
-static uint16_t
-mpipe_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
-{
- struct mpipe_tx_queue *tx_queue = _txq;
- uint16_t result = 0;
-
- if (tx_queue) {
- mpipe_dp_enter(tx_queue->q.priv);
- if (likely(tx_queue->q.link_status))
- result = mpipe_do_xmit(tx_queue, tx_pkts, nb_pkts);
- mpipe_dp_exit(tx_queue->q.priv);
- }
-
- return result;
-}
-
-static int
-mpipe_link_mac(const char *ifname, uint8_t *mac)
-{
- int rc, idx;
- char name[GXIO_MPIPE_LINK_NAME_LEN];
-
- for (idx = 0, rc = 0; !rc; idx++) {
- rc = gxio_mpipe_link_enumerate_mac(idx, name, mac);
- if (!rc && !strncmp(name, ifname, GXIO_MPIPE_LINK_NAME_LEN))
- return 0;
- }
- return -ENODEV;
-}
-
-static int
-rte_pmd_mpipe_probe(const char *ifname,
- const char *params __rte_unused)
-{
- gxio_mpipe_context_t *context;
- struct rte_eth_dev *eth_dev;
- struct mpipe_dev_priv *priv;
- int instance, rc;
- uint8_t *mac;
-
- /* Get the mPIPE instance that the device belongs to. */
- instance = gxio_mpipe_link_instance(ifname);
- context = mpipe_context(instance);
- if (!context) {
- RTE_LOG(ERR, PMD, "%s: No device for link.\n", ifname);
- return -ENODEV;
- }
-
- priv = rte_zmalloc(NULL, sizeof(*priv), 0);
- if (!priv) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate priv.\n", ifname);
- return -ENOMEM;
- }
-
- memset(&priv->tx_stat_mapping, 0xff, sizeof(priv->tx_stat_mapping));
- memset(&priv->rx_stat_mapping, 0xff, sizeof(priv->rx_stat_mapping));
- priv->context = context;
- priv->instance = instance;
- priv->is_xaui = (strncmp(ifname, "xgbe", 4) == 0);
- priv->channel = -1;
-
- mac = priv->mac_addr.addr_bytes;
- rc = mpipe_link_mac(ifname, mac);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to enumerate link.\n", ifname);
- rte_free(priv);
- return -ENODEV;
- }
-
- eth_dev = rte_eth_dev_allocate(ifname);
- if (!eth_dev) {
- RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname);
- rte_free(priv);
- return -ENOMEM;
- }
-
- RTE_LOG(INFO, PMD, "%s: Initialized mpipe device"
- "(mac %02x:%02x:%02x:%02x:%02x:%02x).\n",
- ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
-
- priv->eth_dev = eth_dev;
- priv->port_id = eth_dev->data->port_id;
- eth_dev->data->dev_private = priv;
- eth_dev->data->mac_addrs = &priv->mac_addr;
-
- eth_dev->data->dev_flags = 0;
- eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->driver = NULL;
- eth_dev->data->drv_name = drivername;
- eth_dev->data->numa_node = instance;
-
- eth_dev->dev_ops = &mpipe_dev_ops;
- eth_dev->rx_pkt_burst = &mpipe_recv_pkts;
- eth_dev->tx_pkt_burst = &mpipe_xmit_pkts;
-
- rc = mpipe_link_init(priv);
- if (rc < 0) {
- RTE_LOG(ERR, PMD, "%s: Failed to init link.\n",
- mpipe_name(priv));
- return rc;
- }
-
- return 0;
-}
-
-static struct rte_vdev_driver pmd_mpipe_xgbe_drv = {
- .probe = rte_pmd_mpipe_probe,
-};
-
-static struct rte_vdev_driver pmd_mpipe_gbe_drv = {
- .probe = rte_pmd_mpipe_probe,
-};
-
-RTE_PMD_REGISTER_VDEV(net_mpipe_xgbe, pmd_mpipe_xgbe_drv);
-RTE_PMD_REGISTER_ALIAS(net_mpipe_xgbe, xgbe);
-RTE_PMD_REGISTER_VDEV(net_mpipe_gbe, pmd_mpipe_gbe_drv);
-RTE_PMD_REGISTER_ALIAS(net_mpipe_gbe, gbe);
-
-static void __attribute__((constructor, used))
-mpipe_init_contexts(void)
-{
- struct mpipe_context *context;
- int rc, instance;
-
- for (instance = 0; instance < GXIO_MPIPE_INSTANCE_MAX; instance++) {
- context = &mpipe_contexts[instance];
-
- rte_spinlock_init(&context->lock);
- rc = gxio_mpipe_init(&context->context, instance);
- if (rc < 0)
- break;
- }
-
- mpipe_instances = instance;
-}
diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile
index 4cadd131..4ee2c2dc 100644
--- a/drivers/net/nfp/Makefile
+++ b/drivers/net/nfp/Makefile
@@ -50,9 +50,4 @@ LIBABIVER := 1
#
SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c
index 099d82b3..5c5cba19 100644
--- a/drivers/net/nfp/nfp_net.c
+++ b/drivers/net/nfp/nfp_net.c
@@ -46,6 +46,7 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_dev.h>
#include <rte_ether.h>
#include <rte_malloc.h>
@@ -63,8 +64,7 @@
/* Prototypes */
static void nfp_net_close(struct rte_eth_dev *dev);
static int nfp_net_configure(struct rte_eth_dev *dev);
-static void nfp_net_dev_interrupt_handler(struct rte_intr_handle *handle,
- void *param);
+static void nfp_net_dev_interrupt_handler(void *param);
static void nfp_net_dev_interrupt_delayed_handler(void *param);
static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
static void nfp_net_infos_get(struct rte_eth_dev *dev,
@@ -205,26 +205,6 @@ nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
}
-/* Creating memzone for hardware rings. */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name,
- ring_name, dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
- return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0,
- NFP_MEMZONE_ALIGN);
-}
-
/*
* Atomically reads link status information from global structure rte_eth_dev.
*
@@ -308,7 +288,6 @@ static void
nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
{
nfp_net_rx_queue_release_mbufs(rxq);
- rxq->wr_p = 0;
rxq->rd_p = 0;
rxq->nb_rx_hold = 0;
}
@@ -347,8 +326,6 @@ nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
nfp_net_tx_queue_release_mbufs(txq);
txq->wr_p = 0;
txq->rd_p = 0;
- txq->tail = 0;
- txq->qcp_rd_p = 0;
}
static int
@@ -377,12 +354,12 @@ __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
if (new == 0)
break;
if (new & NFP_NET_CFG_UPDATE_ERR) {
- PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new);
+ PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
return -1;
}
if (cnt >= NFP_NET_POLL_TIMEOUT) {
PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
- " %dms\n", update, cnt);
+ " %dms", update, cnt);
rte_panic("Exiting\n");
}
nanosleep(&wait, 0); /* waiting for a 1ms */
@@ -426,7 +403,7 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
* Reconfig errors imply situations where they can be handled.
* Otherwise, rte_panic is called inside __nfp_net_reconfig
*/
- PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n",
+ PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
ctrl, update);
return -EIO;
}
@@ -456,7 +433,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
* called after that internal process
*/
- PMD_INIT_LOG(DEBUG, "Configure\n");
+ PMD_INIT_LOG(DEBUG, "Configure");
dev_conf = &dev->data->dev_conf;
rxmode = &dev_conf->rxmode;
@@ -464,7 +441,7 @@ nfp_net_configure(struct rte_eth_dev *dev)
/* Checking TX mode */
if (txmode->mq_mode) {
- PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n");
+ PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
return -EINVAL;
}
@@ -474,13 +451,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
update = NFP_NET_CFG_UPDATE_RSS;
new_ctrl = NFP_NET_CFG_CTRL_RSS;
} else {
- PMD_INIT_LOG(INFO, "RSS not supported\n");
+ PMD_INIT_LOG(INFO, "RSS not supported");
return -EINVAL;
}
}
if (rxmode->split_hdr_size) {
- PMD_INIT_LOG(INFO, "rxmode does not support split header\n");
+ PMD_INIT_LOG(INFO, "rxmode does not support split header");
return -EINVAL;
}
@@ -488,13 +465,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
} else {
- PMD_INIT_LOG(INFO, "RXCSUM not supported\n");
+ PMD_INIT_LOG(INFO, "RXCSUM not supported");
return -EINVAL;
}
}
if (rxmode->hw_vlan_filter) {
- PMD_INIT_LOG(INFO, "VLAN filter not supported\n");
+ PMD_INIT_LOG(INFO, "VLAN filter not supported");
return -EINVAL;
}
@@ -502,13 +479,13 @@ nfp_net_configure(struct rte_eth_dev *dev)
if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
} else {
- PMD_INIT_LOG(INFO, "hw vlan strip not supported\n");
+ PMD_INIT_LOG(INFO, "hw vlan strip not supported");
return -EINVAL;
}
}
if (rxmode->hw_vlan_extend) {
- PMD_INIT_LOG(INFO, "VLAN extended not supported\n");
+ PMD_INIT_LOG(INFO, "VLAN extended not supported");
return -EINVAL;
}
@@ -520,12 +497,12 @@ nfp_net_configure(struct rte_eth_dev *dev)
/* this is handled in rte_eth_dev_configure */
if (rxmode->hw_strip_crc) {
- PMD_INIT_LOG(INFO, "strip CRC not supported\n");
+ PMD_INIT_LOG(INFO, "strip CRC not supported");
return -EINVAL;
}
if (rxmode->enable_scatter) {
- PMD_INIT_LOG(INFO, "Scatter not supported\n");
+ PMD_INIT_LOG(INFO, "Scatter not supported");
return -EINVAL;
}
@@ -629,15 +606,57 @@ static void nfp_net_read_mac(struct nfp_net_hw *hw)
}
static int
+nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
+ struct rte_intr_handle *intr_handle)
+{
+ struct nfp_net_hw *hw;
+ int i;
+
+ if (!intr_handle->intr_vec) {
+ intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ dev->data->nb_rx_queues * sizeof(int), 0);
+ if (!intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
+ " intr_vec", dev->data->nb_rx_queues);
+ return -ENOMEM;
+ }
+ }
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
+ PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
+ /* UIO just supports one queue and no LSC*/
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
+ } else {
+ PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ /*
+ * The first msix vector is reserved for non
+ * efd interrupts
+ */
+ nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
+ }
+
+ /* Avoiding TX interrupts */
+ hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
+ return 0;
+}
+
+static int
nfp_net_start(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
uint32_t new_ctrl, update = 0;
struct nfp_net_hw *hw;
+ uint32_t intr_vector;
int ret;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- PMD_INIT_LOG(DEBUG, "Start\n");
+ PMD_INIT_LOG(DEBUG, "Start");
/* Disabling queues just in case... */
nfp_net_disable_queues(dev);
@@ -648,10 +667,40 @@ nfp_net_start(struct rte_eth_dev *dev)
/* Enabling the required queues in the device */
nfp_net_enable_queues(dev);
+ /* check and configure queue intr-vector mapping */
+ if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
+ /*
+ * Better not to share LSC with RX interrupts.
+ * Unregistering LSC interrupt handler
+ */
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ nfp_net_dev_interrupt_handler, (void *)dev);
+
+ if (dev->data->nb_rx_queues > 1) {
+ PMD_INIT_LOG(ERR, "PMD rx interrupt only "
+ "supports 1 queue with UIO");
+ return -EIO;
+ }
+ }
+ intr_vector = dev->data->nb_rx_queues;
+ if (rte_intr_efd_enable(intr_handle, intr_vector))
+ return -1;
+ }
+
+ if (rte_intr_dp_is_en(intr_handle))
+ nfp_configure_rx_interrupt(dev, intr_handle);
+
+ rte_intr_enable(intr_handle);
+
/* Enable device */
- new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_UPDATE_MSIX;
+ new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE;
update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
+ /* Just configuring queues interrupts when necessary */
+ if (rte_intr_dp_is_en(intr_handle))
+ update |= NFP_NET_CFG_UPDATE_MSIX;
+
if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
@@ -697,7 +746,7 @@ nfp_net_stop(struct rte_eth_dev *dev)
{
int i;
- PMD_INIT_LOG(DEBUG, "Stop\n");
+ PMD_INIT_LOG(DEBUG, "Stop");
nfp_net_disable_queues(dev);
@@ -718,10 +767,12 @@ static void
nfp_net_close(struct rte_eth_dev *dev)
{
struct nfp_net_hw *hw;
+ struct rte_pci_device *pci_dev;
- PMD_INIT_LOG(DEBUG, "Close\n");
+ PMD_INIT_LOG(DEBUG, "Close");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
/*
* We assume that the DPDK application is stopping all the
@@ -730,11 +781,11 @@ nfp_net_close(struct rte_eth_dev *dev)
nfp_net_stop(dev);
- rte_intr_disable(&dev->pci_dev->intr_handle);
+ rte_intr_disable(&pci_dev->intr_handle);
nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
/* unregister callback func from eal lib */
- rte_intr_callback_unregister(&dev->pci_dev->intr_handle,
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
nfp_net_dev_interrupt_handler,
(void *)dev);
@@ -755,7 +806,7 @@ nfp_net_promisc_enable(struct rte_eth_dev *dev)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
- PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n");
+ PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
return;
}
@@ -816,6 +867,17 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
struct rte_eth_link link, old;
uint32_t nn_link_status;
+ static const uint32_t ls_to_ethtool[] = {
+ [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
+ [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
+ [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
+ [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
+ [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
+ [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
+ [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
+ };
+
PMD_DRV_LOG(DEBUG, "Link update\n");
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
@@ -831,8 +893,21 @@ nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
link.link_status = ETH_LINK_UP;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- /* Other cards can limit the tx and rx rate per VF */
- link.link_speed = ETH_SPEED_NUM_40G;
+
+ nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
+ NFP_NET_CFG_STS_LINK_RATE_MASK;
+
+ if ((NFD_CFG_MAJOR_VERSION_of(hw->ver) < 4) ||
+ ((NFD_CFG_MINOR_VERSION_of(hw->ver) == 4) &&
+ (NFD_CFG_MINOR_VERSION_of(hw->ver) == 0)))
+ /* We really do not know the speed wil old firmware */
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ else {
+ if (nn_link_status >= RTE_DIM(ls_to_ethtool))
+ link.link_speed = ETH_SPEED_NUM_NONE;
+ else
+ link.link_speed = ls_to_ethtool[nn_link_status];
+ }
if (old.link_status != link.link_status) {
nfp_net_dev_atomic_write_link_status(dev, &link);
@@ -1006,7 +1081,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- dev_info->driver_name = dev->driver->pci_drv.driver.name;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1055,7 +1130,12 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
- dev_info->speed_capa = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
+ ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
+ ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
+
+ if (hw->cap & NFP_NET_CFG_CTRL_LSO)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
}
static const uint32_t *
@@ -1085,13 +1165,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
- if (rxq == NULL) {
- PMD_INIT_LOG(ERR, "Bad queue: %u\n", queue_idx);
- return 0;
- }
-
- idx = rxq->rd_p % rxq->rx_count;
- rxds = &rxq->rxds[idx];
+ idx = rxq->rd_p;
count = 0;
@@ -1119,9 +1193,49 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
return count;
}
+static int
+nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw;
+ int base = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+ base = 1;
+
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
+ NFP_NET_CFG_ICR_UNMASKED);
+ return 0;
+}
+
+static int
+nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev;
+ struct nfp_net_hw *hw;
+ int base = 0;
+
+ hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
+ base = 1;
+
+ /* Make sure all updates are written before un-masking */
+ rte_wmb();
+ nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
+ return 0;
+}
+
static void
nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
struct rte_eth_link link;
memset(&link, 0, sizeof(link));
@@ -1136,8 +1250,8 @@ nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
(int)(dev->data->port_id));
RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
- dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
- dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
+ pci_dev->addr.domain, pci_dev->addr.bus,
+ pci_dev->addr.devid, pci_dev->addr.function);
}
/* Interrupt configuration and handling */
@@ -1152,13 +1266,15 @@ static void
nfp_net_irq_unmask(struct rte_eth_dev *dev)
{
struct nfp_net_hw *hw;
+ struct rte_pci_device *pci_dev;
hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
/* If MSI-X auto-masking is used, clear the entry */
rte_wmb();
- rte_intr_enable(&dev->pci_dev->intr_handle);
+ rte_intr_enable(&pci_dev->intr_handle);
} else {
/* Make sure all updates are written before un-masking */
rte_wmb();
@@ -1168,8 +1284,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
}
static void
-nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+nfp_net_dev_interrupt_handler(void *param)
{
int64_t timeout;
struct rte_eth_link link;
@@ -1321,9 +1436,10 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
sizeof(struct nfp_net_rx_desc) *
- NFP_NET_MAX_RX_DESC, socket_id);
+ NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
+ socket_id);
if (tz == NULL) {
RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
@@ -1390,8 +1506,6 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
rxe[i].mbuf = mbuf;
PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
-
- rxq->wr_p++;
}
/* Make sure all writes are flushed before telling the hardware */
@@ -1465,9 +1579,10 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
sizeof(struct nfp_net_tx_desc) *
- NFP_NET_MAX_TX_DESC, socket_id);
+ NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
+ socket_id);
if (tz == NULL) {
RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
nfp_net_tx_queue_release(txq);
@@ -1475,7 +1590,6 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
}
txq->tx_count = nb_desc;
- txq->tail = 0;
txq->tx_free_thresh = tx_free_thresh;
txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
@@ -1519,6 +1633,33 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
return 0;
}
+/* nfp_net_tx_tso - Set TX descriptor for TSO */
+static inline void
+nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
+ struct rte_mbuf *mb)
+{
+ uint64_t ol_flags;
+ struct nfp_net_hw *hw = txq->hw;
+
+ if (!(hw->cap & NFP_NET_CFG_CTRL_LSO))
+ goto clean_txd;
+
+ ol_flags = mb->ol_flags;
+
+ if (!(ol_flags & PKT_TX_TCP_SEG))
+ goto clean_txd;
+
+ txd->l4_offset = mb->l2_len + mb->l3_len + mb->l4_len;
+ txd->lso = rte_cpu_to_le_16(mb->tso_segsz);
+ txd->flags = PCIE_DESC_TX_LSO;
+ return;
+
+clean_txd:
+ txd->flags = 0;
+ txd->l4_offset = 0;
+ txd->lso = 0;
+}
+
/* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
static inline void
nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
@@ -1604,12 +1745,6 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
- /*
- * hash type is sharing the same word with input port info
- * 31-8: input port
- * 7:0: hash type
- */
- hash_type &= 0xff;
mbuf->hash.rss = hash;
mbuf->ol_flags |= PKT_RX_RSS_HASH;
@@ -1628,29 +1763,6 @@ nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
}
}
-/* nfp_net_check_port - Set mbuf in_port field */
-static void
-nfp_net_check_port(struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf)
-{
- uint32_t port;
-
- if (!(rxd->rxd.flags & PCIE_DESC_RX_INGRESS_PORT)) {
- mbuf->port = 0;
- return;
- }
-
- port = rte_be_to_cpu_32(*(uint32_t *)((uint8_t *)mbuf->buf_addr +
- mbuf->data_off - 8));
-
- /*
- * hash type is sharing the same word with input port info
- * 31-8: input port
- * 7:0: hash type
- */
- port = (uint8_t)(port >> 8);
- mbuf->port = port;
-}
-
static inline void
nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
{
@@ -1696,7 +1808,6 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
struct nfp_net_hw *hw;
struct rte_mbuf *mb;
struct rte_mbuf *new_mb;
- int idx;
uint16_t nb_hold;
uint64_t dma_addr;
int avail;
@@ -1707,7 +1818,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
* DPDK just checks the queue is lower than max queues
* enabled. But the queue needs to be configured
*/
- RTE_LOG(ERR, PMD, "RX Bad queue\n");
+ RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
return -EINVAL;
}
@@ -1716,11 +1827,9 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
nb_hold = 0;
while (avail < nb_pkts) {
- idx = rxq->rd_p % rxq->rx_count;
-
- rxb = &rxq->rxbufs[idx];
+ rxb = &rxq->rxbufs[rxq->rd_p];
if (unlikely(rxb == NULL)) {
- RTE_LOG(ERR, PMD, "rxb does not exist!\n");
+ RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
break;
}
@@ -1730,7 +1839,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
rte_rmb();
- rxds = &rxq->rxds[idx];
+ rxds = &rxq->rxds[rxq->rd_p];
if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
break;
@@ -1740,7 +1849,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
if (unlikely(new_mb == NULL)) {
- RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
+ RTE_LOG_DP(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
"queue_id=%u\n", (unsigned)rxq->port_id,
(unsigned)rxq->qidx);
nfp_net_mbuf_alloc_failed(rxq);
@@ -1771,7 +1880,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
* responsibility of avoiding it. But we have
* to give some info about the error
*/
- RTE_LOG(ERR, PMD,
+ RTE_LOG_DP(ERR, PMD,
"mbuf overflow likely due to the RX offset.\n"
"\t\tYour mbuf size should have extra space for"
" RX offset=%u bytes.\n"
@@ -1800,9 +1909,6 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
/* Checking the checksum flag */
nfp_net_rx_cksum(rxq, rxds, mb);
- /* Checking the port flag */
- nfp_net_check_port(rxds, mb);
-
if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
@@ -1821,6 +1927,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
rxq->rd_p++;
+ if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
+ rxq->rd_p = 0;
}
if (nb_hold == 0)
@@ -1866,33 +1974,40 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
/* Work out how many packets have been sent */
qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
- if (qcp_rd_p == txq->qcp_rd_p) {
+ if (qcp_rd_p == txq->rd_p) {
PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
"packets (%u, %u)\n", txq->qidx,
- qcp_rd_p, txq->qcp_rd_p);
+ qcp_rd_p, txq->rd_p);
return 0;
}
- if (qcp_rd_p > txq->qcp_rd_p)
- todo = qcp_rd_p - txq->qcp_rd_p;
+ if (qcp_rd_p > txq->rd_p)
+ todo = qcp_rd_p - txq->rd_p;
else
- todo = qcp_rd_p + txq->tx_count - txq->qcp_rd_p;
+ todo = qcp_rd_p + txq->tx_count - txq->rd_p;
- PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->qcp_rd_p: %u, qcp->rd_p: %u\n",
- qcp_rd_p, txq->qcp_rd_p, txq->rd_p);
+ PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u\n",
+ qcp_rd_p, txq->rd_p, txq->rd_p);
if (todo == 0)
return todo;
- txq->qcp_rd_p += todo;
- txq->qcp_rd_p %= txq->tx_count;
txq->rd_p += todo;
+ if (unlikely(txq->rd_p >= txq->tx_count))
+ txq->rd_p -= txq->tx_count;
return todo;
}
/* Leaving always free descriptors for avoiding wrapping confusion */
-#define NFP_FREE_TX_DESC(t) (t->tx_count - (t->wr_p - t->rd_p) - 8)
+static inline
+uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
+{
+ if (txq->wr_p >= txq->rd_p)
+ return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
+ else
+ return txq->rd_p - txq->wr_p - 8;
+}
/*
* nfp_net_txq_full - Check if the TX queue free descriptors
@@ -1903,9 +2018,9 @@ nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
* This function uses the host copy* of read/write pointers
*/
static inline
-int nfp_net_txq_full(struct nfp_net_txq *txq)
+uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
{
- return NFP_FREE_TX_DESC(txq) < txq->tx_free_thresh;
+ return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
}
static uint16_t
@@ -1913,7 +2028,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct nfp_net_txq *txq;
struct nfp_net_hw *hw;
- struct nfp_net_tx_desc *txds;
+ struct nfp_net_tx_desc *txds, txd;
struct rte_mbuf *pkt;
uint64_t dma_addr;
int pkt_size, dma_size;
@@ -1923,15 +2038,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq = tx_queue;
hw = txq->hw;
- txds = &txq->txds[txq->tail];
+ txds = &txq->txds[txq->wr_p];
PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
- txq->qidx, txq->tail, nb_pkts);
+ txq->qidx, txq->wr_p, nb_pkts);
- if ((NFP_FREE_TX_DESC(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
+ if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
nfp_net_tx_free_bufs(txq);
- free_descs = (uint16_t)NFP_FREE_TX_DESC(txq);
+ free_descs = (uint16_t)nfp_free_tx_desc(txq);
if (unlikely(free_descs == 0))
return 0;
@@ -1944,7 +2059,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Sending packets */
while ((i < nb_pkts) && free_descs) {
/* Grabbing the mbuf linked to the current descriptor */
- lmbuf = &txq->txbufs[txq->tail].mbuf;
+ lmbuf = &txq->txbufs[txq->wr_p].mbuf;
/* Warming the cache for releasing the mbuf later on */
RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
@@ -1952,7 +2067,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
if (unlikely((pkt->nb_segs > 1) &&
!(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
- PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set\n");
+ PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
rte_panic("Multisegment packet unsupported\n");
}
@@ -1962,19 +2077,18 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/*
* Checksum and VLAN flags just in the first descriptor for a
- * multisegment packet
+ * multisegment packet, but TSO info needs to be in all of them.
*/
- nfp_net_tx_cksum(txq, txds, pkt);
+ txd.data_len = pkt->pkt_len;
+ nfp_net_tx_tso(txq, &txd, pkt);
+ nfp_net_tx_cksum(txq, &txd, pkt);
if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
(hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
- txds->flags |= PCIE_DESC_TX_VLAN;
- txds->vlan = pkt->vlan_tci;
+ txd.flags |= PCIE_DESC_TX_VLAN;
+ txd.vlan = pkt->vlan_tci;
}
- if (pkt->ol_flags & PKT_TX_TCP_SEG)
- rte_panic("TSO is not supported\n");
-
/*
* mbuf data_len is the data in one segment and pkt_len data
* in the whole packet. When the packet is just one segment,
@@ -1982,16 +2096,20 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
*/
pkt_size = pkt->pkt_len;
- /* Releasing mbuf which was prefetched above */
- if (*lmbuf)
- rte_pktmbuf_free(*lmbuf);
- /*
- * Linking mbuf with descriptor for being released
- * next time descriptor is used
- */
- *lmbuf = pkt;
-
while (pkt_size) {
+ /* Copying TSO, VLAN and cksum info */
+ *txds = txd;
+
+ /* Releasing mbuf used by this descriptor previously*/
+ if (*lmbuf)
+ rte_pktmbuf_free_seg(*lmbuf);
+
+ /*
+ * Linking mbuf with descriptor for being released
+ * next time descriptor is used
+ */
+ *lmbuf = pkt;
+
dma_size = pkt->data_len;
dma_addr = rte_mbuf_data_dma_addr(pkt);
PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
@@ -1999,16 +2117,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
/* Filling descriptors fields */
txds->dma_len = dma_size;
- txds->data_len = pkt->pkt_len;
+ txds->data_len = txd.data_len;
txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
txds->dma_addr_lo = (dma_addr & 0xffffffff);
ASSERT(free_descs > 0);
free_descs--;
txq->wr_p++;
- txq->tail++;
- if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/
- txq->tail = 0;
+ if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+ txq->wr_p = 0;
pkt_size -= dma_size;
if (!pkt_size) {
@@ -2019,7 +2136,8 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
pkt = pkt->next;
}
/* Referencing next free TX descriptor */
- txds = &txq->txds[txq->tail];
+ txds = &txq->txds[txq->wr_p];
+ lmbuf = &txq->txbufs[txq->wr_p].mbuf;
issued_descs++;
}
i++;
@@ -2306,6 +2424,8 @@ static const struct eth_dev_ops nfp_net_eth_dev_ops = {
.rx_queue_count = nfp_net_rx_queue_count,
.tx_queue_setup = nfp_net_tx_queue_setup,
.tx_queue_release = nfp_net_tx_queue_release,
+ .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
+ .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
};
static int
@@ -2330,15 +2450,16 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
hw->device_id = pci_dev->id.device_id;
hw->vendor_id = pci_dev->id.vendor_id;
hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
- PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n",
+ PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
pci_dev->id.vendor_id, pci_dev->id.device_id,
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function);
@@ -2365,13 +2486,13 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
return -ENODEV;
}
- PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off);
- PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off);
+ PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x", tx_bar_off);
+ PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x", rx_bar_off);
hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
- PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n",
+ PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
nfp_net_cfg_queue_setup(hw);
@@ -2387,9 +2508,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
else
hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
- PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n",
+ PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d",
hw->ver, hw->max_mtu);
- PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap,
+ PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s", hw->cap,
hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
@@ -2400,13 +2521,12 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
- pci_dev = eth_dev->pci_dev;
hw->ctrl = 0;
hw->stride_rx = stride;
hw->stride_tx = stride;
- PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
+ PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
hw->max_rx_queues, hw->max_tx_queues);
/* Initializing spinlock for reconfigs */
@@ -2441,9 +2561,6 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
nfp_net_dev_interrupt_handler,
(void *)eth_dev);
- /* enable uio intr after callback register */
- rte_intr_enable(&pci_dev->intr_handle);
-
/* Telling the firmware about the LSC interrupt entry */
nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
@@ -2453,7 +2570,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
return 0;
}
-static struct rte_pci_id pci_id_nfp_net_map[] = {
+static const struct rte_pci_id pci_id_nfp_net_map[] = {
{
RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
PCI_DEVICE_ID_NFP6000_PF_NIC)
@@ -2467,20 +2584,28 @@ static struct rte_pci_id pci_id_nfp_net_map[] = {
},
};
-static struct eth_driver rte_nfp_net_pmd = {
- .pci_drv = {
- .id_table = pci_id_nfp_net_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
- RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = nfp_net_init,
- .dev_private_size = sizeof(struct nfp_net_adapter),
+static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct nfp_net_adapter), nfp_net_init);
+}
+
+static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_nfp_net_pmd = {
+ .id_table = pci_id_nfp_net_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = eth_nfp_pci_probe,
+ .remove = eth_nfp_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_nfp, "* igb_uio | uio_pci_generic | vfio");
/*
* Local variables:
diff --git a/drivers/net/nfp/nfp_net_ctrl.h b/drivers/net/nfp/nfp_net_ctrl.h
index fce82515..2c500433 100644
--- a/drivers/net/nfp/nfp_net_ctrl.h
+++ b/drivers/net/nfp/nfp_net_ctrl.h
@@ -112,6 +112,7 @@
#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* Enable VXLAN */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* Enable NVGRE */
+#define NFP_NET_CFG_CTRL_MSIX_TX_OFF (0x1 << 26) /* Disable MSIX for TX */
#define NFP_NET_CFG_UPDATE 0x0004
#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
@@ -157,6 +158,17 @@
#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0)
#define NFP_NET_CFG_STS 0x0034
#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */
+/* Link rate */
+#define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1
+#define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF
+#define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0
+#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1
+#define NFP_NET_CFG_STS_LINK_RATE_1G 2
+#define NFP_NET_CFG_STS_LINK_RATE_10G 3
+#define NFP_NET_CFG_STS_LINK_RATE_25G 4
+#define NFP_NET_CFG_STS_LINK_RATE_40G 5
+#define NFP_NET_CFG_STS_LINK_RATE_50G 6
+#define NFP_NET_CFG_STS_LINK_RATE_100G 7
#define NFP_NET_CFG_CAP 0x0038
#define NFP_NET_CFG_MAX_TXRINGS 0x003c
#define NFP_NET_CFG_MAX_RXRINGS 0x0040
diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h
index c1809720..eec56bc1 100644
--- a/drivers/net/nfp/nfp_net_pmd.h
+++ b/drivers/net/nfp/nfp_net_pmd.h
@@ -121,25 +121,26 @@ struct nfp_net_adapter;
#define NFD_CFG_MINOR_VERSION_of(x) (((x) >> 0) & 0xff)
#include <linux/types.h>
+#include <rte_io.h>
static inline uint8_t nn_readb(volatile const void *addr)
{
- return *((volatile const uint8_t *)(addr));
+ return rte_read8(addr);
}
static inline void nn_writeb(uint8_t val, volatile void *addr)
{
- *((volatile uint8_t *)(addr)) = val;
+ rte_write8(val, addr);
}
static inline uint32_t nn_readl(volatile const void *addr)
{
- return *((volatile const uint32_t *)(addr));
+ return rte_read32(addr);
}
static inline void nn_writel(uint32_t val, volatile void *addr)
{
- *((volatile uint32_t *)(addr)) = val;
+ rte_write32(val, addr);
}
static inline uint64_t nn_readq(volatile void *addr)
@@ -216,12 +217,10 @@ struct nfp_net_txq {
uint32_t wr_p;
uint32_t rd_p;
- uint32_t qcp_rd_p;
uint32_t tx_count;
uint32_t tx_free_thresh;
- uint32_t tail;
/*
* For each descriptor keep a reference to the mbuff and
@@ -240,7 +239,7 @@ struct nfp_net_txq {
struct nfp_net_tx_desc *txds;
/*
- * At this point 56 bytes have been used for all the fields in the
+ * At this point 48 bytes have been used for all the fields in the
* TX critical path. We have room for 8 bytes and still all placed
* in a cache line. We are not using the threshold values below nor
* the txq_flags but if we need to, we can add the most used in the
@@ -269,7 +268,7 @@ struct nfp_net_txq {
#define PCIE_DESC_RX_I_TCP_CSUM_OK (1 << 11)
#define PCIE_DESC_RX_I_UDP_CSUM (1 << 10)
#define PCIE_DESC_RX_I_UDP_CSUM_OK (1 << 9)
-#define PCIE_DESC_RX_INGRESS_PORT (1 << 8)
+#define PCIE_DESC_RX_SPARE (1 << 8)
#define PCIE_DESC_RX_EOP (1 << 7)
#define PCIE_DESC_RX_IP4_CSUM (1 << 6)
#define PCIE_DESC_RX_IP4_CSUM_OK (1 << 5)
@@ -326,7 +325,6 @@ struct nfp_net_rxq {
* freelist descriptors and @rd_p is where the driver start
* reading descriptors for newly arrive packets from.
*/
- uint32_t wr_p;
uint32_t rd_p;
/*
diff --git a/drivers/net/null/Makefile b/drivers/net/null/Makefile
index 0c909c6f..77810bce 100644
--- a/drivers/net/null/Makefile
+++ b/drivers/net/null/Makefile
@@ -41,23 +41,11 @@ CFLAGS += $(WERROR_FLAGS)
EXPORT_MAP := rte_pmd_null_version.map
-LIBABIVER := 1
+LIBABIVER := 2
#
# all source are stored in SRCS-y
#
SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += rte_eth_null.c
-#
-# Export include files
-#
-SYMLINK-y-include += rte_eth_null.h
-
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c
index 836d982a..abf3ec75 100644
--- a/drivers/net/null/rte_eth_null.c
+++ b/drivers/net/null/rte_eth_null.c
@@ -33,14 +33,13 @@
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_vdev.h>
#include <rte_kvargs.h>
#include <rte_spinlock.h>
-#include "rte_eth_null.h"
-
#define ETH_NULL_PACKET_SIZE_ARG "size"
#define ETH_NULL_PACKET_COPY_ARG "copy"
@@ -50,6 +49,7 @@ static unsigned default_packet_copy;
static const char *valid_arguments[] = {
ETH_NULL_PACKET_SIZE_ARG,
ETH_NULL_PACKET_COPY_ARG,
+ "driver",
NULL
};
@@ -88,7 +88,6 @@ struct pmd_internals {
static struct ether_addr eth_addr = { .addr_bytes = {0} };
-static const char *drivername = "Null PMD";
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -113,8 +112,6 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
break;
bufs[i]->data_len = (uint16_t)packet_size;
bufs[i]->pkt_len = packet_size;
- bufs[i]->nb_segs = 1;
- bufs[i]->next = NULL;
bufs[i]->port = h->internals->port_id;
}
@@ -295,13 +292,11 @@ eth_dev_info(struct rte_eth_dev *dev,
return;
internals = dev->data->dev_private;
- dev_info->driver_name = drivername;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
dev_info->reta_size = internals->reta_size;
dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
}
@@ -480,9 +475,10 @@ static const struct eth_dev_ops ops = {
.rss_hash_conf_get = eth_rss_hash_conf_get
};
-int
-eth_dev_null_create(const char *name,
- const unsigned numa_node,
+static struct rte_vdev_driver pmd_null_drv;
+
+static int
+eth_dev_null_create(struct rte_vdev_device *dev,
unsigned packet_size,
unsigned packet_copy)
{
@@ -499,27 +495,25 @@ eth_dev_null_create(const char *name,
0xBE, 0xAC, 0x01, 0xFA
};
- if (name == NULL)
- return -EINVAL;
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
- numa_node);
+ dev->device.numa_node);
/* now do all data allocation - for eth_dev structure, dummy pci driver
* and internal (private) data
*/
- data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
- if (data == NULL)
- goto error;
-
- internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
- if (internals == NULL)
- goto error;
+ data = rte_zmalloc_socket(rte_vdev_device_name(dev), sizeof(*data), 0,
+ dev->device.numa_node);
+ if (!data)
+ return -ENOMEM;
- /* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
- if (eth_dev == NULL)
- goto error;
+ eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internals));
+ if (!eth_dev) {
+ rte_free(data);
+ return -ENOMEM;
+ }
/* now put it all together
* - store queue data in internals,
@@ -530,6 +524,7 @@ eth_dev_null_create(const char *name,
/* NOTE: we'll replace the data element, of originally allocated eth_dev
* so the nulls are local per-process */
+ internals = eth_dev->data->dev_private;
internals->packet_size = packet_size;
internals->packet_copy = packet_copy;
internals->port_id = eth_dev->data->port_id;
@@ -539,24 +534,16 @@ eth_dev_null_create(const char *name,
rte_memcpy(internals->rss_key, default_rss_key, 40);
- data->dev_private = internals;
- data->port_id = eth_dev->data->port_id;
+ rte_memcpy(data, eth_dev->data, sizeof(*data));
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
data->mac_addrs = &eth_addr;
- strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
eth_dev->data = data;
eth_dev->dev_ops = &ops;
- TAILQ_INIT(&eth_dev->link_intr_cbs);
-
- eth_dev->driver = NULL;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- data->kdrv = RTE_KDRV_NONE;
- data->drv_name = drivername;
- data->numa_node = numa_node;
/* finally assign rx and tx ops */
if (packet_copy) {
@@ -568,12 +555,6 @@ eth_dev_null_create(const char *name,
}
return 0;
-
-error:
- rte_free(data);
- rte_free(internals);
-
- return -1;
}
static inline int
@@ -611,21 +592,21 @@ get_packet_copy_arg(const char *key __rte_unused,
}
static int
-rte_pmd_null_probe(const char *name, const char *params)
+rte_pmd_null_probe(struct rte_vdev_device *dev)
{
- unsigned numa_node;
+ const char *name, *params;
unsigned packet_size = default_packet_size;
unsigned packet_copy = default_packet_copy;
struct rte_kvargs *kvlist = NULL;
int ret;
- if (name == NULL)
+ if (!dev)
return -EINVAL;
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
- numa_node = rte_socket_id();
-
if (params != NULL) {
kvlist = rte_kvargs_parse(params, valid_arguments);
if (kvlist == NULL)
@@ -654,7 +635,7 @@ rte_pmd_null_probe(const char *name, const char *params)
"packet copy is %s\n", packet_size,
packet_copy ? "enabled" : "disabled");
- ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
+ ret = eth_dev_null_create(dev, packet_size, packet_copy);
free_kvlist:
if (kvlist)
@@ -663,18 +644,18 @@ free_kvlist:
}
static int
-rte_pmd_null_remove(const char *name)
+rte_pmd_null_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
- if (name == NULL)
+ if (!dev)
return -EINVAL;
RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
rte_socket_id());
/* find the ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (eth_dev == NULL)
return -1;
diff --git a/drivers/net/null/rte_pmd_null_version.map b/drivers/net/null/rte_pmd_null_version.map
index 84b1d0fe..ef353984 100644
--- a/drivers/net/null/rte_pmd_null_version.map
+++ b/drivers/net/null/rte_pmd_null_version.map
@@ -2,10 +2,3 @@ DPDK_2.0 {
local: *;
};
-
-DPDK_2.2 {
- global:
-
- eth_dev_null_create;
-
-} DPDK_2.0;
diff --git a/drivers/net/pcap/Makefile b/drivers/net/pcap/Makefile
index 89ac4024..7ebd0bef 100644
--- a/drivers/net/pcap/Makefile
+++ b/drivers/net/pcap/Makefile
@@ -55,11 +55,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += rte_eth_pcap.c
#
SYMLINK-y-include +=
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c
index 57b0b315..defb3b41 100644
--- a/drivers/net/pcap/rte_eth_pcap.c
+++ b/drivers/net/pcap/rte_eth_pcap.c
@@ -40,6 +40,7 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_kvargs.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
@@ -119,7 +120,6 @@ static struct ether_addr eth_addr = {
.addr_bytes = { 0, 0, 0, 0x1, 0x2, 0x3 }
};
-static const char *drivername = "Pcap PMD";
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -294,9 +294,9 @@ eth_pcap_tx_dumper(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
}
}
- rte_pktmbuf_free(mbuf);
num_tx++;
tx_bytes += mbuf->pkt_len;
+ rte_pktmbuf_free(mbuf);
}
/*
@@ -552,14 +552,12 @@ eth_dev_info(struct rte_eth_dev *dev,
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->driver_name = drivername;
dev_info->if_index = internals->if_index;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t) -1;
dev_info->max_rx_queues = dev->data->nb_rx_queues;
dev_info->max_tx_queues = dev->data->nb_tx_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static void
@@ -790,15 +788,20 @@ open_tx_iface(const char *key, const char *value, void *extra_args)
return 0;
}
+static struct rte_vdev_driver pmd_pcap_drv;
+
static int
-pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
+pmd_init_internals(struct rte_vdev_device *vdev,
+ const unsigned int nb_rx_queues,
const unsigned int nb_tx_queues,
struct pmd_internals **internals,
struct rte_eth_dev **eth_dev)
{
struct rte_eth_dev_data *data = NULL;
- unsigned int numa_node = rte_socket_id();
+ unsigned int numa_node = vdev->device.numa_node;
+ const char *name;
+ name = rte_vdev_device_name(vdev);
RTE_LOG(INFO, PMD, "Creating pcap-backed ethdev on numa socket %u\n",
numa_node);
@@ -807,17 +810,14 @@ pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
*/
data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
if (data == NULL)
- goto error;
-
- *internals = rte_zmalloc_socket(name, sizeof(**internals), 0,
- numa_node);
- if (*internals == NULL)
- goto error;
+ return -1;
/* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate(name);
- if (*eth_dev == NULL)
- goto error;
+ *eth_dev = rte_eth_vdev_allocate(vdev, sizeof(**internals));
+ if (*eth_dev == NULL) {
+ rte_free(data);
+ return -1;
+ }
/* now put it all together
* - store queue data in internals,
@@ -825,9 +825,8 @@ pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
* - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
- data->dev_private = *internals;
- data->port_id = (*eth_dev)->data->port_id;
- snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
+ *internals = (*eth_dev)->data->dev_private;
+ rte_memcpy(data, (*eth_dev)->data, sizeof(*data));
data->nb_rx_queues = (uint16_t)nb_rx_queues;
data->nb_tx_queues = (uint16_t)nb_tx_queues;
data->dev_link = pmd_link;
@@ -839,26 +838,17 @@ pmd_init_internals(const char *name, const unsigned int nb_rx_queues,
*/
(*eth_dev)->data = data;
(*eth_dev)->dev_ops = &ops;
- (*eth_dev)->driver = NULL;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- data->kdrv = RTE_KDRV_NONE;
- data->drv_name = drivername;
- data->numa_node = numa_node;
return 0;
-
-error:
- rte_free(data);
- rte_free(*internals);
-
- return -1;
}
static int
-eth_from_pcaps_common(const char *name, struct pmd_devargs *rx_queues,
- const unsigned int nb_rx_queues, struct pmd_devargs *tx_queues,
- const unsigned int nb_tx_queues, struct rte_kvargs *kvlist,
- struct pmd_internals **internals, struct rte_eth_dev **eth_dev)
+eth_from_pcaps_common(struct rte_vdev_device *vdev,
+ struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
+ struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
+ struct rte_kvargs *kvlist, struct pmd_internals **internals,
+ struct rte_eth_dev **eth_dev)
{
struct rte_kvargs_pair *pair = NULL;
unsigned int k_idx;
@@ -870,7 +860,7 @@ eth_from_pcaps_common(const char *name, struct pmd_devargs *rx_queues,
if (tx_queues == NULL && nb_tx_queues > 0)
return -1;
- if (pmd_init_internals(name, nb_rx_queues, nb_tx_queues, internals,
+ if (pmd_init_internals(vdev, nb_rx_queues, nb_tx_queues, internals,
eth_dev) < 0)
return -1;
@@ -908,16 +898,17 @@ eth_from_pcaps_common(const char *name, struct pmd_devargs *rx_queues,
}
static int
-eth_from_pcaps(const char *name, struct pmd_devargs *rx_queues,
- const unsigned int nb_rx_queues, struct pmd_devargs *tx_queues,
- const unsigned int nb_tx_queues, struct rte_kvargs *kvlist,
- int single_iface, unsigned int using_dumpers)
+eth_from_pcaps(struct rte_vdev_device *vdev,
+ struct pmd_devargs *rx_queues, const unsigned int nb_rx_queues,
+ struct pmd_devargs *tx_queues, const unsigned int nb_tx_queues,
+ struct rte_kvargs *kvlist, int single_iface,
+ unsigned int using_dumpers)
{
struct pmd_internals *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
int ret;
- ret = eth_from_pcaps_common(name, rx_queues, nb_rx_queues,
+ ret = eth_from_pcaps_common(vdev, rx_queues, nb_rx_queues,
tx_queues, nb_tx_queues, kvlist, &internals, &eth_dev);
if (ret < 0)
@@ -937,8 +928,9 @@ eth_from_pcaps(const char *name, struct pmd_devargs *rx_queues,
}
static int
-pmd_pcap_probe(const char *name, const char *params)
+pmd_pcap_probe(struct rte_vdev_device *dev)
{
+ const char *name;
unsigned int is_rx_pcap = 0, is_tx_pcap = 0;
struct rte_kvargs *kvlist;
struct pmd_devargs pcaps = {0};
@@ -946,13 +938,14 @@ pmd_pcap_probe(const char *name, const char *params)
int single_iface = 0;
int ret;
+ name = rte_vdev_device_name(dev);
RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name);
gettimeofday(&start_time, NULL);
start_cycles = rte_get_timer_cycles();
hz = rte_get_timer_hz();
- kvlist = rte_kvargs_parse(params, valid_arguments);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL)
return -1;
@@ -1026,7 +1019,7 @@ pmd_pcap_probe(const char *name, const char *params)
goto free_kvlist;
create_eth:
- ret = eth_from_pcaps(name, &pcaps, pcaps.num_of_queue, &dumpers,
+ ret = eth_from_pcaps(dev, &pcaps, pcaps.num_of_queue, &dumpers,
dumpers.num_of_queue, kvlist, single_iface, is_tx_pcap);
free_kvlist:
@@ -1036,18 +1029,18 @@ free_kvlist:
}
static int
-pmd_pcap_remove(const char *name)
+pmd_pcap_remove(struct rte_vdev_device *dev)
{
struct rte_eth_dev *eth_dev = NULL;
RTE_LOG(INFO, PMD, "Closing pcap ethdev on numa socket %u\n",
rte_socket_id());
- if (name == NULL)
+ if (!dev)
return -1;
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocated(name);
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
if (eth_dev == NULL)
return -1;
diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile
index 29b443df..3323914c 100644
--- a/drivers/net/qede/Makefile
+++ b/drivers/net/qede/Makefile
@@ -56,6 +56,9 @@ endif
CFLAGS_BASE_DRIVER += -Wno-strict-prototypes
ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1)
CFLAGS_BASE_DRIVER += -Wno-shift-negative-value
+ifeq ($(shell test $(GCC_VERSION) -ge 70 && echo 1), 1)
+CFLAGS_BASE_DRIVER += -Wno-implicit-fallthrough
+endif
endif
else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
CFLAGS_BASE_DRIVER += -Wno-format-extra-args
@@ -76,33 +79,31 @@ endif
#
#
BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
-$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS+=$(CFLAGS_BASE_DRIVER)))
+$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+VPATH += $(SRCDIR)/base
#
# all source are stored in SRCS-y
#
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dev.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_hw.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_cxt.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_l2.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sp_commands.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_fw_funcs.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_spq.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dcbx.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sriov.c
-SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_cxt.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_l2.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sp_commands.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_fw_funcs.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_spq.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_init_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_mcp.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_int.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_dcbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += bcm_osal.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_sriov.c
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += ecore_vf.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c
SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c
-
-# dependent libs:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_net
+SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_fdir.c
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c
index 28be9587..3f895cd4 100644
--- a/drivers/net/qede/base/bcm_osal.c
+++ b/drivers/net/qede/base/bcm_osal.c
@@ -98,9 +98,7 @@ inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit)
u32 nwords = 0;
OSAL_BUILD_BUG_ON(!limit);
nwords = (limit - 1) / OSAL_BITS_PER_UL + 1;
- for (i = 0; i < nwords; i++)
- if (~(addr[i] != 0))
- break;
+ for (i = 0; i < nwords && ~(addr[i]) == 0; i++);
return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]);
}
diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h
index 0b446f2e..32c9b251 100644
--- a/drivers/net/qede/base/bcm_osal.h
+++ b/drivers/net/qede/base/bcm_osal.h
@@ -18,6 +18,7 @@
#include <rte_cycles.h>
#include <rte_debug.h>
#include <rte_ether.h>
+#include <rte_io.h>
/* Forward declaration */
struct ecore_dev;
@@ -88,8 +89,12 @@ typedef int bool;
#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0)
#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0)
#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0)
-#define OSAL_VALLOC(dev, size) rte_malloc("qede", size, 0)
-#define OSAL_FREE(dev, memory) rte_free((void *)memory)
+#define OSAL_VZALLOC(dev, size) rte_zmalloc("qede", size, 0)
+#define OSAL_FREE(dev, memory) \
+ do { \
+ rte_free((void *)memory); \
+ memory = OSAL_NULL; \
+ } while (0)
#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory)
#define OSAL_MEM_ZERO(mem, size) bzero(mem, size)
#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size)
@@ -113,18 +118,18 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
/* HW reads/writes */
-#define DIRECT_REG_RD(_dev, _reg_addr) \
- (*((volatile u32 *) (_reg_addr)))
+#define DIRECT_REG_RD(_dev, _reg_addr) rte_read32(_reg_addr)
#define REG_RD(_p_hwfn, _reg_offset) \
DIRECT_REG_RD(_p_hwfn, \
((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset)))
-#define DIRECT_REG_WR16(_reg_addr, _val) \
- (*((volatile u16 *)(_reg_addr)) = _val)
+#define DIRECT_REG_WR16(_reg_addr, _val) rte_write16((_val), (_reg_addr))
+
+#define DIRECT_REG_WR(_dev, _reg_addr, _val) rte_write32((_val), (_reg_addr))
-#define DIRECT_REG_WR(_dev, _reg_addr, _val) \
- (*((volatile u32 *)(_reg_addr)) = _val)
+#define DIRECT_REG_WR_RELAXED(_dev, _reg_addr, _val) \
+ rte_write32_relaxed((_val), (_reg_addr))
#define REG_WR(_p_hwfn, _reg_offset, _val) \
DIRECT_REG_WR(NULL, \
@@ -134,9 +139,10 @@ void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *,
DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \
(_reg_offset)), (u16)_val)
-#define DOORBELL(_p_hwfn, _db_addr, _val) \
- DIRECT_REG_WR(_p_hwfn, \
- ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + (_db_addr)), (u32)_val)
+#define DOORBELL(_p_hwfn, _db_addr, _val) \
+ DIRECT_REG_WR_RELAXED((_p_hwfn), \
+ ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + \
+ (_db_addr)), (u32)_val)
/* Mutexes */
@@ -162,6 +168,7 @@ typedef pthread_mutex_t osal_mutex_t;
#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t))
#define OSAL_DPC_INIT(dpc, hwfn) nothing
#define OSAL_POLL_MODE_DPC(hwfn) nothing
+#define OSAL_DPC_SYNC(hwfn) nothing
/* Lists */
@@ -286,7 +293,8 @@ typedef struct osal_list_t {
#define OSAL_WMB(dev) rte_wmb()
#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing
-#define OSAL_BITS_PER_BYTE (8)
+#define OSAL_BIT(nr) (1UL << (nr))
+#define OSAL_BITS_PER_BYTE (8)
#define OSAL_BITS_PER_UL (sizeof(unsigned long) * OSAL_BITS_PER_BYTE)
#define OSAL_BITS_PER_UL_MASK (OSAL_BITS_PER_UL - 1)
@@ -314,6 +322,8 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_BUILD_BUG_ON(cond) nothing
#define ETH_ALEN ETHER_ADDR_LEN
+#define OSAL_BITMAP_WEIGHT(bitmap, count) 0
+
#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn)
#define OSAL_DCBX_AEN(hwfn, mib_type) nothing
@@ -323,6 +333,7 @@ u32 qede_find_first_zero_bit(unsigned long *, u32);
#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0
#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0)
#define OSAL_PF_VF_MSG(hwfn, vfid) 0
+#define OSAL_PF_VF_MALICIOUS(hwfn, vfid) nothing
#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0
#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing
#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0
@@ -391,6 +402,7 @@ u32 qede_osal_log2(u32);
#define OSAL_STRCPY(dst, string) strcpy(dst, string)
#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len)
#define OSAL_STRCMP(str1, str2) strcmp(str1, str2)
+#define OSAL_STRTOUL(str, base, res) 0
#define OSAL_INLINE inline
#define OSAL_REG_ADDR(_p_hwfn, _offset) \
@@ -409,5 +421,7 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type,
qede_get_mcp_proto_stats(dev, type, stats)
#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0)
-
+#define OSAL_MFW_TLV_REQ(p_hwfn) (0)
+#define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0)
+#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0
#endif /* __BCM_OSAL_H */
diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h
index b431c78d..cbcde227 100644
--- a/drivers/net/qede/base/common_hsi.h
+++ b/drivers/net/qede/base/common_hsi.h
@@ -78,8 +78,16 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
-#define MAX_NUM_LL2_RX_QUEUES 32
-#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
+/*
+ * Usually LL2 queues are opened in pairs TX-RX.
+ * There is a hard restriction on number of RX queues (limited by Tstorm RAM)
+ * and TX counters (Pstorm RAM).
+ * Number of TX queues is almost unlimited.
+ * The constants are different so as to allow asymmetric LL2 connections
+ */
+
+#define MAX_NUM_LL2_RX_QUEUES 48
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
/****************************************************************************/
@@ -89,7 +97,7 @@
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 10
+#define FW_MINOR_VERSION 18
#define FW_REVISION_VERSION 9
#define FW_ENGINEERING_VERSION 0
@@ -107,20 +115,21 @@
#define MAX_NUM_PFS (MAX_NUM_PFS_K2)
#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */
-#define MAX_NUM_VFS_K2 (192)
#define MAX_NUM_VFS_BB (120)
-#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
+#define MAX_NUM_VFS_K2 (192)
+#define E4_MAX_NUM_VFS (MAX_NUM_VFS_K2)
+#define COMMON_MAX_NUM_VFS (240)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS_K2 (MAX_NUM_PFS_K2 + MAX_NUM_VFS_K2)
-#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + E4_MAX_NUM_VFS)
/* in both BB and K2, the VF number starts from 16. so for arrays containing all
* possible PFs and VFs - we need a constant for this size
*/
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
-#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + E4_MAX_NUM_VFS)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
@@ -149,9 +158,10 @@
#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
/* CIDs */
-#define NUM_OF_CONNECTION_TYPES (8)
-#define NUM_OF_LCIDS (320)
-#define NUM_OF_LTIDS (320)
+#define E4_NUM_OF_CONNECTION_TYPES (8)
+#define NUM_OF_TASK_TYPES (8)
+#define NUM_OF_LCIDS (320)
+#define NUM_OF_LTIDS (320)
/* Clock values */
#define MASTER_CLK_FREQ_E4 (375e6)
@@ -176,6 +186,13 @@
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12)
#define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff)
+#define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0)
+#define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_TYPE (2)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_REGION (3)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_CID (4)
+#define CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE (5)
+
/*****************/
/* DQ CONSTANTS */
@@ -471,7 +488,6 @@
#define PXP_BAR_DQ 1
/* PTT and GTT */
-#define PXP_NUM_PF_WINDOWS 12
#define PXP_PER_PF_ENTRY_SIZE 8
#define PXP_NUM_GLOBAL_WINDOWS 243
#define PXP_GLOBAL_ENTRY_SIZE 4
@@ -496,6 +512,8 @@
#define PXP_PF_ME_OPAQUE_ADDR 0x1f8
#define PXP_PF_ME_CONCRETE_ADDR 0x1fc
+#define PXP_NUM_PF_WINDOWS 12
+
#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000
#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS
#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000
@@ -518,8 +536,6 @@
PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1)
/* PF BAR */
-/*#define PXP_BAR0_START_GRC 0x1000 */
-/*#define PXP_BAR0_GRC_LENGTH 0xBFF000 */
#define PXP_BAR0_START_GRC 0x0000
#define PXP_BAR0_GRC_LENGTH 0x1C00000
#define PXP_BAR0_END_GRC \
@@ -588,7 +604,7 @@
#define SDM_OP_GEN_TRIG_AGG_INT 2
#define SDM_OP_GEN_TRIG_LOADER 4
#define SDM_OP_GEN_TRIG_INDICATE_ERROR 6
-#define SDM_OP_GEN_TRIG_RELEASE_THREAD 7
+#define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9
/***********************************************************/
/* Completion types */
@@ -611,6 +627,7 @@
#define SDM_COMP_TYPE_RELEASE_THREAD 7
/* Write to local RAM as a completion */
#define SDM_COMP_TYPE_RAM 8
+#define SDM_COMP_TYPE_INC_ORDER_CNT 9 /* Applicable only for E4 */
/******************/
@@ -721,13 +738,10 @@ union event_ring_data {
u8 bytes[8] /* Byte Array */;
struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */;
struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */;
- /* Dedicated field for RoCE affiliated asynchronous error */;
- struct regpair roceHandle;
+ struct regpair roceHandle /* Dedicated field for RDMA data */;
struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */;
struct initial_cleanup_eqe_data vf_init_cleanup
/* VF Initial Cleanup data */;
-/* Host handle for the Async Completions */
- struct regpair iwarp_handle;
};
/* Event Ring Entry */
struct event_ring_entry {
@@ -768,6 +782,8 @@ enum protocol_type {
MAX_PROTOCOL_TYPE
};
+
+
/*
* Ustorm Queue Zone
*/
@@ -881,7 +897,7 @@ enum db_dest {
*/
enum db_dpm_type {
DPM_LEGACY /* Legacy DPM- to Xstorm RAM */,
- DPM_ROCE /* RoCE DPM- to NIG */,
+ DPM_RDMA /* RDMA DPM (only RoCE in E4) - to NIG */,
/* L2 DPM inline- to PBF, with packet data on doorbell */
DPM_L2_INLINE,
DPM_L2_BD /* L2 DPM with BD- to PBF, with TX BD data on doorbell */,
@@ -968,42 +984,42 @@ struct db_pwm_addr {
};
/*
- * Parameters to RoCE firmware, passed in EDPM doorbell
+ * Parameters to RDMA firmware, passed in EDPM doorbell
*/
-struct db_roce_dpm_params {
+struct db_rdma_dpm_params {
__le32 params;
/* Size in QWORD-s of the DPM burst */
-#define DB_ROCE_DPM_PARAMS_SIZE_MASK 0x3F
-#define DB_ROCE_DPM_PARAMS_SIZE_SHIFT 0
-/* Type of DPM transacation (DPM_ROCE) (use enum db_dpm_type) */
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_DPM_TYPE_SHIFT 6
-/* opcode for ROCE operation */
-#define DB_ROCE_DPM_PARAMS_OPCODE_MASK 0xFF
-#define DB_ROCE_DPM_PARAMS_OPCODE_SHIFT 8
+#define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F
+#define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0
+/* Type of DPM transacation (DPM_RDMA) (use enum db_dpm_type) */
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6
+/* opcode for RDMA operation */
+#define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF
+#define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8
/* the size of the WQE payload in bytes */
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
-#define DB_ROCE_DPM_PARAMS_WQE_SIZE_SHIFT 16
-#define DB_ROCE_DPM_PARAMS_RESERVED0_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_RESERVED0_SHIFT 27
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF
+#define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16
+#define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27
/* RoCE completion flag */
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
-#define DB_ROCE_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
-#define DB_ROCE_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
-#define DB_ROCE_DPM_PARAMS_S_FLG_SHIFT 29
-#define DB_ROCE_DPM_PARAMS_RESERVED1_MASK 0x3
-#define DB_ROCE_DPM_PARAMS_RESERVED1_SHIFT 30
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1
+#define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28
+#define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 /* RoCE S flag */
+#define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29
+#define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x3
+#define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30
};
/*
- * Structure for doorbell data, in ROCE DPM mode, for the first doorbell in a
+ * Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a
* DPM burst
*/
-struct db_roce_dpm_data {
+struct db_rdma_dpm_data {
__le16 icid /* internal CID */;
__le16 prod_val /* aggregated value to update */;
-/* parameters passed to RoCE firmware */
- struct db_roce_dpm_params params;
+/* parameters passed to RDMA firmware */
+ struct db_rdma_dpm_params params;
};
/* Igu interrupt command */
@@ -1136,6 +1152,68 @@ struct parsing_and_err_flags {
/*
+ * Parsing error flags bitmap.
+ */
+struct parsing_err_flags {
+ __le16 flags;
+/* MAC error indication */
+#define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_MAC_ERROR_SHIFT 0
+/* truncation error indication */
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TRUNC_ERROR_SHIFT 1
+/* packet too small indication */
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_PKT_TOO_SMALL_SHIFT 2
+/* Header Missing Tag */
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_MISSING_TAG_SHIFT 3
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_VER_MISMTCH_SHIFT 4
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_V4_HDR_LEN_TOO_SMALL_SHIFT 5
+/* set this error if: 1. total-len is smaller than hdr-len 2. total-ip-len
+ * indicates number that is bigger than real packet length 3. tunneling:
+ * total-ip-length of the outer header points to offset that is smaller than
+ * the one pointed to by the total-ip-len of the inner hdr.
+ */
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_IP_BAD_TOTAL_LEN_SHIFT 6
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_IP_V4_CHKSM_ERROR_SHIFT 7
+/* from frame cracker output. for either TCP or UDP */
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_L4_IP_LEN_MISMTCH_SHIFT 8
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_MASK 0x1
+#define PARSING_ERR_FLAGS_ZERO_UDP_IP_V6_CHKSM_SHIFT 9
+/* cksm calculated and value isn't 0xffff or L4-cksm-wasnt-calculated for any
+ * reason, like: udp/ipv4 checksum is 0 etc.
+ */
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_INNER_L4_CHKSM_ERROR_SHIFT 10
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_MASK 0x1
+#define PARSING_ERR_FLAGS_ANY_HDR_ZERO_TTL_OR_HOP_LIM_SHIFT 11
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_MASK 0x1
+#define PARSING_ERR_FLAGS_NON_8021Q_TAG_EXISTS_IN_BOTH_HDRS_SHIFT 12
+/* set if geneve option size was over 32 byte */
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_MASK 0x1
+#define PARSING_ERR_FLAGS_GENEVE_OPTION_OVERSIZED_SHIFT 13
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_IP_V4_CHKSM_ERROR_SHIFT 14
+/* from frame cracker output */
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_MASK 0x1
+#define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15
+};
+
+
+/*
* Pb context
*/
struct pb_context {
@@ -1492,49 +1570,57 @@ struct tdif_task_context {
struct timers_context {
__le32 logical_client_0;
/* Expiration time of logical client 0 */
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC0_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED0_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED0_SHIFT 27
/* Valid bit of logical client 0 */
#define TIMERS_CONTEXT_VALIDLC0_MASK 0x1
#define TIMERS_CONTEXT_VALIDLC0_SHIFT 28
/* Active bit of logical client 0 */
#define TIMERS_CONTEXT_ACTIVELC0_MASK 0x1
#define TIMERS_CONTEXT_ACTIVELC0_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED0_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED0_SHIFT 30
+#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
__le32 logical_client_1;
/* Expiration time of logical client 1 */
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC1_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED2_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED2_SHIFT 27
/* Valid bit of logical client 1 */
#define TIMERS_CONTEXT_VALIDLC1_MASK 0x1
#define TIMERS_CONTEXT_VALIDLC1_SHIFT 28
/* Active bit of logical client 1 */
#define TIMERS_CONTEXT_ACTIVELC1_MASK 0x1
#define TIMERS_CONTEXT_ACTIVELC1_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED1_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED1_SHIFT 30
+#define TIMERS_CONTEXT_RESERVED3_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED3_SHIFT 30
__le32 logical_client_2;
/* Expiration time of logical client 2 */
-#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_EXPIRATIONTIMELC2_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED4_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED4_SHIFT 27
/* Valid bit of logical client 2 */
#define TIMERS_CONTEXT_VALIDLC2_MASK 0x1
#define TIMERS_CONTEXT_VALIDLC2_SHIFT 28
/* Active bit of logical client 2 */
#define TIMERS_CONTEXT_ACTIVELC2_MASK 0x1
#define TIMERS_CONTEXT_ACTIVELC2_SHIFT 29
-#define TIMERS_CONTEXT_RESERVED2_MASK 0x3
-#define TIMERS_CONTEXT_RESERVED2_SHIFT 30
+#define TIMERS_CONTEXT_RESERVED5_MASK 0x3
+#define TIMERS_CONTEXT_RESERVED5_SHIFT 30
__le32 host_expiration_fields;
/* Expiration time on host (closest one) */
-#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0xFFFFFFF
+#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_MASK 0x7FFFFFF
#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALUE_SHIFT 0
+#define TIMERS_CONTEXT_RESERVED6_MASK 0x1
+#define TIMERS_CONTEXT_RESERVED6_SHIFT 27
/* Valid bit of host expiration */
#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_MASK 0x1
#define TIMERS_CONTEXT_HOSTEXPRIRATIONVALID_SHIFT 28
-#define TIMERS_CONTEXT_RESERVED3_MASK 0x7
-#define TIMERS_CONTEXT_RESERVED3_SHIFT 29
+#define TIMERS_CONTEXT_RESERVED7_MASK 0x7
+#define TIMERS_CONTEXT_RESERVED7_SHIFT 29
};
diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h
index 907b35b9..80b11a4c 100644
--- a/drivers/net/qede/base/ecore.h
+++ b/drivers/net/qede/base/ecore.h
@@ -28,9 +28,21 @@
#include "ecore_proto_if.h"
#include "mcp_public.h"
-#define MAX_HWFNS_PER_DEVICE (4)
+#define ECORE_MAJOR_VERSION 8
+#define ECORE_MINOR_VERSION 18
+#define ECORE_REVISION_VERSION 7
+#define ECORE_ENGINEERING_VERSION 0
+
+#define ECORE_VERSION \
+ ((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \
+ (ECORE_REVISION_VERSION << 8) | ECORE_ENGINEERING_VERSION)
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
+
+#define MAX_HWFNS_PER_DEVICE 2
#define NAME_SIZE 128 /* @DPDK */
-#define VER_SIZE 16
#define ECORE_WFQ_UNIT 100
#include "../qede_logs.h" /* @DPDK */
@@ -80,13 +92,22 @@ enum ecore_nvm_cmd {
#define SET_FIELD(value, name, flag) \
do { \
(value) &= ~(name##_MASK << name##_SHIFT); \
- (value) |= (((u64)flag) << (name##_SHIFT)); \
+ (value) |= ((((u64)flag) & (u64)name##_MASK) << (name##_SHIFT));\
} while (0)
#define GET_FIELD(value, name) \
(((value) >> (name##_SHIFT)) & name##_MASK)
#endif
+#define ECORE_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define ECORE_MFW_SET_FIELD(name, field, value) \
+do { \
+ (name) &= ~(field ## _MASK); \
+ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK)); \
+} while (0)
+
static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS)
{
u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
@@ -158,8 +179,8 @@ enum DP_MODULE {
ECORE_MSG_CXT = 0x800000,
ECORE_MSG_LL2 = 0x1000000,
ECORE_MSG_ILT = 0x2000000,
- ECORE_MSG_RDMA = 0x4000000,
- ECORE_MSG_DEBUG = 0x8000000,
+ ECORE_MSG_RDMA = 0x4000000,
+ ECORE_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
#endif
@@ -179,6 +200,7 @@ struct ecore_cxt_mngr;
struct ecore_dma_mem;
struct ecore_sb_sp_info;
struct ecore_ll2_info;
+struct ecore_l2_info;
struct ecore_igu_info;
struct ecore_mcp_info;
struct ecore_dcbx_info;
@@ -205,33 +227,29 @@ enum ecore_tunn_clss {
MAX_ECORE_TUNN_CLSS,
};
-struct ecore_tunn_start_params {
- unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
+struct ecore_tunn_update_type {
+ bool b_update_mode;
+ bool b_mode_enabled;
+ enum ecore_tunn_clss tun_cls;
+};
+
+struct ecore_tunn_update_udp_port {
+ bool b_update_port;
+ u16 port;
};
-struct ecore_tunn_update_params {
- unsigned long tunn_mode_update_mask;
- unsigned long tunn_mode;
- u16 vxlan_udp_port;
- u16 geneve_udp_port;
- u8 update_rx_pf_clss;
- u8 update_tx_pf_clss;
- u8 update_vxlan_udp_port;
- u8 update_geneve_udp_port;
- u8 tunn_clss_vxlan;
- u8 tunn_clss_l2geneve;
- u8 tunn_clss_ipgeneve;
- u8 tunn_clss_l2gre;
- u8 tunn_clss_ipgre;
+struct ecore_tunnel_info {
+ struct ecore_tunn_update_type vxlan;
+ struct ecore_tunn_update_type l2_geneve;
+ struct ecore_tunn_update_type ip_geneve;
+ struct ecore_tunn_update_type l2_gre;
+ struct ecore_tunn_update_type ip_gre;
+
+ struct ecore_tunn_update_udp_port vxlan_port;
+ struct ecore_tunn_update_udp_port geneve_port;
+
+ bool b_update_rx_cls;
+ bool b_update_tx_cls;
};
/* The PCI personality is not quite synonymous to protocol ID:
@@ -243,7 +261,8 @@ enum ecore_pci_personality {
ECORE_PCI_FCOE,
ECORE_PCI_ISCSI,
ECORE_PCI_ETH_ROCE,
- ECORE_PCI_IWARP,
+ ECORE_PCI_ETH_IWARP,
+ ECORE_PCI_ETH_RDMA,
ECORE_PCI_DEFAULT /* default in shmem */
};
@@ -273,6 +292,7 @@ enum ecore_resources {
ECORE_LL2_QUEUE,
ECORE_CMDQS_CQS,
ECORE_RDMA_STATS_QUEUE,
+ ECORE_BDQ,
ECORE_MAX_RESC, /* must be last */
};
@@ -288,6 +308,7 @@ enum ecore_feature {
ECORE_RDMA_CNQ,
ECORE_ISCSI_CQ,
ECORE_FCOE_CQ,
+ ECORE_VF_L2_QUE,
ECORE_MAX_FEATURES,
};
@@ -302,6 +323,7 @@ enum ecore_port_mode {
ECORE_PORT_MODE_DE_2X25G,
ECORE_PORT_MODE_DE_1X25G,
ECORE_PORT_MODE_DE_4X25G,
+ ECORE_PORT_MODE_DE_2X10G,
};
enum ecore_dev_cap {
@@ -326,6 +348,19 @@ enum ecore_hw_err_type {
struct ecore_hw_info {
/* PCI personality */
enum ecore_pci_personality personality;
+#define ECORE_IS_RDMA_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_ROCE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_IWARP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == ECORE_PCI_ETH_RDMA)
+#define ECORE_IS_L2_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == ECORE_PCI_ETH || \
+ ECORE_IS_RDMA_PERSONALITY(dev))
/* Resource Allocation scheme results */
u32 resc_start[ECORE_MAX_RESC];
@@ -347,9 +382,6 @@ struct ecore_hw_info {
u8 num_active_tc;
- /* Traffic class used for tcp out of order traffic */
- u8 ooo_tc;
-
/* The traffic class used by PF for it's offloaded protocol */
u8 offload_tc;
@@ -372,16 +404,11 @@ struct ecore_hw_info {
u32 port_mode;
u32 hw_mode;
unsigned long device_capabilities;
-};
-struct ecore_hw_cid_data {
- u32 cid;
- bool b_cid_allocated;
- u8 vfid; /* 1-based; 0 signals this is for a PF */
+ /* Default DCBX mode */
+ u8 dcbx_mode;
- /* Additional identifiers */
- u16 opaque_fid;
- u8 vport_id;
+ u16 mtu;
};
/* maximun size of read/write commands (HW limit) */
@@ -424,15 +451,18 @@ struct ecore_qm_info {
struct init_qm_port_params *qm_port_params;
u16 start_pq;
u8 start_vport;
- u8 pure_lb_pq;
- u8 offload_pq;
- u8 pure_ack_pq;
- u8 ooo_pq;
- u8 vf_queues_offset;
+ u16 pure_lb_pq;
+ u16 offload_pq;
+ u16 pure_ack_pq;
+ u16 ooo_pq;
+ u16 first_vf_pq;
+ u16 first_mcos_pq;
+ u16 first_rl_pq;
u16 num_pqs;
u16 num_vf_pqs;
u8 num_vports;
u8 max_phys_tcs_per_port;
+ u8 ooo_tc;
bool pf_rl_en;
bool pf_wfq_en;
bool vport_rl_en;
@@ -472,7 +502,7 @@ struct ecore_hwfn {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
- void *dp_ctx;
+ void *dp_ctx;
bool first_on_engine;
bool hw_init_done;
@@ -527,8 +557,8 @@ struct ecore_hwfn {
u32 rdma_prs_search_reg;
/* Array of sb_info of all status blocks */
- struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
- u16 num_sbs;
+ struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
+ u16 num_sbs;
struct ecore_cxt_mngr *p_cxt_mngr;
@@ -544,9 +574,6 @@ struct ecore_hwfn {
struct ecore_mcp_info *mcp_info;
struct ecore_dcbx_info *p_dcbx_info;
- struct ecore_hw_cid_data *p_tx_cids;
- struct ecore_hw_cid_data *p_rx_cids;
-
struct ecore_dmae_info dmae_info;
/* QM init */
@@ -572,6 +599,12 @@ struct ecore_hwfn {
/* If one of the following is set then EDPM shouldn't be used */
u8 dcbx_no_edpm;
u8 db_bar_no_edpm;
+
+ /* L2-related */
+ struct ecore_l2_info *p_l2_info;
+
+ /* @DPDK */
+ struct ecore_ptt *p_arfs_ptt;
};
#ifndef __EXTRACT__LINUX__
@@ -603,7 +636,7 @@ struct ecore_dev {
u32 dp_module;
u8 dp_level;
char name[NAME_SIZE];
- void *dp_ctx;
+ void *dp_ctx;
u8 type;
#define ECORE_DEV_TYPE_BB (0 << 0)
@@ -620,6 +653,10 @@ struct ecore_dev {
#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH)
#define ECORE_IS_K2(dev) ECORE_IS_AH(dev)
+#define ECORE_DEV_ID_MASK 0xff00
+#define ECORE_DEV_ID_MASK_BB 0x1600
+#define ECORE_DEV_ID_MASK_AH 0x8000
+
u16 vendor_id;
u16 device_id;
@@ -679,7 +716,7 @@ struct ecore_dev {
int pcie_width;
int pcie_speed;
- u8 ver_str[NAME_SIZE]; /* @DPDK */
+
/* Add MF related configuration */
u8 mcp_rev;
u8 boot_mode;
@@ -711,10 +748,7 @@ struct ecore_dev {
/* SRIOV */
struct ecore_hw_sriov_info *p_iov_info;
#define IS_ECORE_SRIOV(p_dev) (!!(p_dev)->p_iov_info)
- bool b_hw_channel;
-
- unsigned long tunn_mode;
-
+ struct ecore_tunnel_info tunnel;
bool b_is_vf;
u32 drv_type;
@@ -766,15 +800,6 @@ struct ecore_dev {
#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
-#ifndef REAL_ASIC_ONLY
-#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \
- (ECORE_IS_BB_A0(p_hwfn->p_dev)) && \
- (ECORE_PATH_ID(p_hwfn) == 1) && \
- ((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \
- (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \
- (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G)))
-#endif
-
/**
* @brief ecore_concrete_to_sw_fid - get the sw function id from
* the concrete value.
@@ -783,8 +808,8 @@ struct ecore_dev {
*
* @return OSAL_INLINE u8
*/
-static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
- u32 concrete_fid)
+static OSAL_INLINE u8
+ecore_concrete_to_sw_fid(__rte_unused struct ecore_dev *p_dev, u32 concrete_fid)
{
u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
@@ -800,7 +825,7 @@ static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev,
}
#define PURE_LB_TC 8
-#define OOO_LB_TC 9
+#define PKT_LB_TC 9
int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate);
void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
@@ -811,7 +836,33 @@ int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw);
void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
int ecore_device_num_engines(struct ecore_dev *p_dev);
int ecore_device_num_ports(struct ecore_dev *p_dev);
+void ecore_set_fw_mac_addr(__le16 *fw_msb, __le16 *fw_mid, __le16 *fw_lsb,
+ u8 *mac);
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS (1 << 0)
+#define PQ_FLAGS_MCOS (1 << 1)
+#define PQ_FLAGS_LB (1 << 2)
+#define PQ_FLAGS_OOO (1 << 3)
+#define PQ_FLAGS_ACK (1 << 4)
+#define PQ_FLAGS_OFLD (1 << 5)
+#define PQ_FLAGS_VFS (1 << 6)
+
+/* physical queue index for cm context intialization */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags);
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc);
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf);
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 qpid);
+
+/* amount of resources used in qm init */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn);
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn);
#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0])
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id);
+
#endif /* __ECORE_H */
diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h
index 9ad1874f..ba272a91 100644
--- a/drivers/net/qede/base/ecore_chain.h
+++ b/drivers/net/qede/base/ecore_chain.h
@@ -54,21 +54,9 @@ struct ecore_chain_pbl_u32 {
u32 cons_page_idx;
};
-struct ecore_chain_pbl {
- /* Base address of a pre-allocated buffer for pbl */
- dma_addr_t p_phys_table;
- void *p_virt_table;
-
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
- */
- void **pp_virt_addr_tbl;
-
- /* Index to current used page by producer/consumer */
- union {
- struct ecore_chain_pbl_u16 pbl16;
- struct ecore_chain_pbl_u32 pbl32;
- } u;
+struct ecore_chain_ext_pbl {
+ dma_addr_t p_pbl_phys;
+ void *p_pbl_virt;
};
struct ecore_chain_u16 {
@@ -84,40 +72,75 @@ struct ecore_chain_u32 {
};
struct ecore_chain {
- /* Address of first page of the chain */
- void *p_virt_addr;
- dma_addr_t p_phys_addr;
-
+ /* fastpath portion of the chain - required for commands such
+ * as produce / consume.
+ */
/* Point to next element to produce/consume */
void *p_prod_elem;
void *p_cons_elem;
- enum ecore_chain_mode mode;
- enum ecore_chain_use_mode intended_use;
+ /* Fastpath portions of the PBL [if exists] */
+
+ struct {
+ /* Table for keeping the virtual addresses of the chain pages,
+ * respectively to the physical addresses in the pbl table.
+ */
+ void **pp_virt_addr_tbl;
+
+ union {
+ struct ecore_chain_pbl_u16 u16;
+ struct ecore_chain_pbl_u32 u32;
+ } c;
+ } pbl;
- enum ecore_chain_cnt_type cnt_type;
union {
struct ecore_chain_u16 chain16;
struct ecore_chain_u32 chain32;
} u;
- u32 page_cnt;
+ /* Capacity counts only usable elements */
+ u32 capacity;
+ u32 page_cnt;
- /* Number of elements - capacity is for usable elements only,
- * while size will contain total number of elements [for entire chain].
+ /* A u8 would suffice for mode, but it would save as a lot of headaches
+ * on castings & defaults.
*/
- u32 capacity;
- u32 size;
+ enum ecore_chain_mode mode;
/* Elements information for fast calculations */
u16 elem_per_page;
u16 elem_per_page_mask;
- u16 elem_unusable;
- u16 usable_per_page;
u16 elem_size;
u16 next_page_mask;
+ u16 usable_per_page;
+ u8 elem_unusable;
- struct ecore_chain_pbl pbl;
+ u8 cnt_type;
+
+ /* Slowpath of the chain - required for initialization and destruction,
+ * but isn't involved in regular functionality.
+ */
+
+ /* Base address of a pre-allocated buffer for pbl */
+ struct {
+ dma_addr_t p_phys_table;
+ void *p_virt_table;
+ } pbl_sp;
+
+ /* Address of first page of the chain - the address is required
+ * for fastpath operation [consume/produce] but only for the the SINGLE
+ * flavour which isn't considered fastpath [== SPQ].
+ */
+ void *p_virt_addr;
+ dma_addr_t p_phys_addr;
+
+ /* Total number of elements [for entire chain] */
+ u32 size;
+
+ u8 intended_use;
+
+ /* TBD - do we really need this? Couldn't find usage for it */
+ bool b_external_pbl;
void *dp_ctx;
};
@@ -128,8 +151,8 @@ struct ecore_chain {
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \
- (1 + ((sizeof(struct ecore_chain_next) - 1) / \
- (elem_size))) : 0)
+ (u8)(1 + ((sizeof(struct ecore_chain_next) - 1) / \
+ (elem_size))) : 0)
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32)(ELEMS_PER_PAGE(elem_size) - \
@@ -238,7 +261,7 @@ u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
}
static OSAL_INLINE
-u16 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
+u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
{
return p_chain->elem_unusable;
}
@@ -256,7 +279,7 @@ static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
static OSAL_INLINE
dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
{
- return p_chain->pbl.p_phys_table;
+ return p_chain->pbl_sp.p_phys_table;
}
/**
@@ -281,9 +304,9 @@ ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
p_next = (struct ecore_chain_next *)(*p_next_elem);
*p_next_elem = p_next->next_virt;
if (is_chain_u16(p_chain))
- *(u16 *)idx_to_inc += p_chain->elem_unusable;
+ *(u16 *)idx_to_inc += (u16)p_chain->elem_unusable;
else
- *(u32 *)idx_to_inc += p_chain->elem_unusable;
+ *(u32 *)idx_to_inc += (u16)p_chain->elem_unusable;
break;
case ECORE_CHAIN_MODE_SINGLE:
*p_next_elem = p_chain->p_virt_addr;
@@ -384,7 +407,7 @@ static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
if ((p_chain->u.chain16.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain16.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -393,7 +416,7 @@ static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
if ((p_chain->u.chain32.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain32.prod_idx;
- p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
+ p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx);
}
@@ -458,7 +481,7 @@ static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
if ((p_chain->u.chain16.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain16.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
@@ -467,7 +490,7 @@ static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
if ((p_chain->u.chain32.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain32.cons_idx;
- p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
+ p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx);
}
@@ -511,25 +534,26 @@ static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
u32 reset_val = p_chain->page_cnt - 1;
if (is_chain_u16(p_chain)) {
- p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
- p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
+ p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
} else {
- p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
- p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
+ p_chain->pbl.c.u32.prod_page_idx = reset_val;
+ p_chain->pbl.c.u32.cons_page_idx = reset_val;
}
}
switch (p_chain->intended_use) {
- case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
- case ECORE_CHAIN_USE_TO_PRODUCE:
- /* Do nothing */
- break;
-
case ECORE_CHAIN_USE_TO_CONSUME:
- /* produce empty elements */
- for (i = 0; i < p_chain->capacity; i++)
+ /* produce empty elements */
+ for (i = 0; i < p_chain->capacity; i++)
ecore_chain_recycle_consumed(p_chain);
- break;
+ break;
+
+ case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
+ case ECORE_CHAIN_USE_TO_PRODUCE:
+ default:
+ /* Do nothing */
+ break;
}
}
@@ -556,9 +580,9 @@ ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
p_chain->p_virt_addr = OSAL_NULL;
p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size;
- p_chain->intended_use = intended_use;
+ p_chain->intended_use = (u8)intended_use;
p_chain->mode = mode;
- p_chain->cnt_type = cnt_type;
+ p_chain->cnt_type = (u8)cnt_type;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
@@ -570,9 +594,9 @@ ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
p_chain->page_cnt = page_cnt;
p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt;
-
- p_chain->pbl.p_phys_table = 0;
- p_chain->pbl.p_virt_table = OSAL_NULL;
+ p_chain->b_external_pbl = false;
+ p_chain->pbl_sp.p_phys_table = 0;
+ p_chain->pbl_sp.p_virt_table = OSAL_NULL;
p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
p_chain->dp_ctx = dp_ctx;
@@ -616,8 +640,8 @@ static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
dma_addr_t p_phys_pbl,
void **pp_virt_addr_tbl)
{
- p_chain->pbl.p_phys_table = p_phys_pbl;
- p_chain->pbl.p_virt_table = p_virt_pbl;
+ p_chain->pbl_sp.p_phys_table = p_phys_pbl;
+ p_chain->pbl_sp.p_virt_table = p_virt_pbl;
p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
}
diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c
index 3dd953d9..688118bb 100644
--- a/drivers/net/qede/base/ecore_cxt.c
+++ b/drivers/net/qede/base/ecore_cxt.c
@@ -8,6 +8,7 @@
#include "bcm_osal.h"
#include "reg_addr.h"
+#include "common_hsi.h"
#include "ecore_hsi_common.h"
#include "ecore_hsi_eth.h"
#include "ecore_rt_defs.h"
@@ -19,6 +20,7 @@
#include "ecore_hw.h"
#include "ecore_dev_api.h"
#include "ecore_sriov.h"
+#include "ecore_mcp.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
@@ -100,7 +102,6 @@ struct ecore_tid_seg {
struct ecore_conn_type_cfg {
u32 cid_count;
- u32 cid_start;
u32 cids_per_vf;
struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
};
@@ -191,11 +192,11 @@ struct ecore_cxt_mngr {
*/
u32 vf_count;
- /* total number of SRQ's for this hwfn */
- u32 srq_count;
-
/* Acquired CIDs */
struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
+ /* TBD - do we want this allocated to reserve space? */
+ struct ecore_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
/* ILT shadow table */
struct ecore_dma_mem *ilt_shadow;
@@ -209,10 +210,29 @@ struct ecore_cxt_mngr {
u32 t2_num_pages;
u64 first_free;
u64 last_free;
+
+ /* The infrastructure originally was very generic and context/task
+ * oriented - per connection-type we would set how many of those
+ * are needed, and later when determining how much memory we're
+ * needing for a given block we'd iterate over all the relevant
+ * connection-types.
+ * But since then we've had some additional resources, some of which
+ * require memory which is indepent of the general context/task
+ * scheme. We add those here explicitly per-feature.
+ */
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
+
+ /* TODO - VF arfs filters ? */
};
/* check if resources/configuration is required according to protocol type */
-static OSAL_INLINE bool src_proto(enum protocol_type type)
+static OSAL_INLINE bool src_proto(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type)
{
return type == PROTOCOLID_TOE;
}
@@ -250,18 +270,22 @@ struct ecore_src_iids {
u32 per_vf_cids;
};
-static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
+static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_cxt_mngr *p_mngr,
struct ecore_src_iids *iids)
{
u32 i;
for (i = 0; i < MAX_CONN_TYPES; i++) {
- if (!src_proto(i))
+ if (!src_proto(p_hwfn, i))
continue;
iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
}
+
+ /* Add L2 filtering filters in addition */
+ iids->pf_cids += p_mngr->arfs_count;
}
/* counts the iids for the Timers block configuration */
@@ -276,14 +300,24 @@ struct ecore_tm_iids {
static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
struct ecore_tm_iids *iids)
{
+ bool tm_vf_required = false;
+ bool tm_required = false;
u32 i, j;
for (i = 0; i < MAX_CONN_TYPES; i++) {
struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
- if (tm_cid_proto(i)) {
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
iids->pf_cids += p_cfg->cid_count;
- iids->per_vf_cids += p_cfg->cids_per_vf;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
}
if (tm_tid_proto(i)) {
@@ -313,7 +347,8 @@ static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
}
}
-void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids)
+static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_iids *iids)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct ecore_tid_seg *segs;
@@ -671,7 +706,7 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
/* SRC */
p_cli = &p_mngr->clients[ILT_CLI_SRC];
- ecore_cxt_src_iids(p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
/* Both the PF and VFs searcher connections are stored in the per PF
* database. Thus sum the PF searcher cids and all the VFs searcher
@@ -718,12 +753,11 @@ enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
for (i = 1; i < p_mngr->vf_count; i++) {
ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_TM);
}
-
- p_cli->vf_total_lines = curr_line - p_blk->start_line;
}
/* TSDM (SRQ CONTEXT) */
@@ -766,7 +800,6 @@ static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
p_mngr->t2[i].size);
OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
- p_mngr->t2 = OSAL_NULL;
}
static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
@@ -787,7 +820,7 @@ static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
if (!p_src->active)
return ECORE_SUCCESS;
- ecore_cxt_src_iids(p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
total_size = conn_num * sizeof(struct src_ent);
@@ -1006,44 +1039,75 @@ ilt_shadow_fail:
static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 type;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
p_mngr->acquired[type].max_count = 0;
p_mngr->acquired[type].start_cid = 0;
+
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ OSAL_FREE(p_hwfn->p_dev,
+ p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].max_count = 0;
+ p_mngr->acquired_vf[type][vf].start_cid = 0;
+ }
}
}
+static enum _ecore_status_t
+ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
+ u32 cid_start, u32 cid_count,
+ struct ecore_cid_acquired_map *p_map)
+{
+ u32 size;
+
+ if (!cid_count)
+ return ECORE_SUCCESS;
+
+ size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
+ p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
+ if (p_map->cid_map == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ p_map->max_count = cid_count;
+ p_map->start_cid = cid_start;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_map->start_cid, p_map->max_count);
+
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 start_cid = 0;
- u32 type;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type, vf;
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
- u32 size;
+ struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
+ struct ecore_cid_acquired_map *p_map;
- if (cid_cnt == 0)
- continue;
-
- size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD);
- p_mngr->acquired[type].cid_map = OSAL_ZALLOC(p_hwfn->p_dev,
- GFP_KERNEL, size);
- if (!p_mngr->acquired[type].cid_map)
+ /* Handle PF maps */
+ p_map = &p_mngr->acquired[type];
+ if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map))
goto cid_map_fail;
- p_mngr->acquired[type].max_count = cid_cnt;
- p_mngr->acquired[type].start_cid = start_cid;
-
- p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+ /* Handle VF maps */
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ if (ecore_cid_map_alloc_single(p_hwfn, type,
+ vf_start_cid,
+ p_cfg->cids_per_vf,
+ p_map))
+ goto cid_map_fail;
+ }
- DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
- "Type %08x start: %08x count %08x\n",
- type, p_mngr->acquired[type].start_cid,
- p_mngr->acquired[type].max_count);
- start_cid += cid_cnt;
+ start_cid += p_cfg->cid_count;
+ vf_start_cid += p_cfg->cids_per_vf;
}
return ECORE_SUCCESS;
@@ -1155,27 +1219,41 @@ void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
ecore_cid_map_free(p_hwfn);
ecore_cxt_src_t2_free(p_hwfn);
ecore_ilt_shadow_free(p_hwfn);
- OSAL_MUTEX_DEALLOC(&p_mngr->mutex);
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
-
- p_hwfn->p_cxt_mngr = OSAL_NULL;
}
void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
+ struct ecore_conn_type_cfg *p_cfg;
int type;
+ u32 len;
/* Reset acquired cids */
for (type = 0; type < MAX_CONN_TYPES; type++) {
- u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
- u32 i;
+ u32 vf;
+
+ p_cfg = &p_mngr->conn_cfg[type];
+ if (p_cfg->cid_count) {
+ p_map = &p_mngr->acquired[type];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
- if (cid_cnt == 0)
+ if (!p_cfg->cids_per_vf)
continue;
- for (i = 0; i < DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); i++)
- p_mngr->acquired[type].cid_map[i] = 0;
+ for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ len = DIV_ROUND_UP(p_map->max_count,
+ BITS_PER_MAP_WORD) *
+ MAP_WORD_SIZE;
+ OSAL_MEM_ZERO(p_map->cid_map, len);
+ }
}
}
@@ -1366,18 +1444,10 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn)
}
/* CM PF */
-static enum _ecore_status_t ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
+void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
{
- union ecore_qm_pq_params pq_params;
- u16 pq;
-
- /* XCM pure-LB queue */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
-
- return ECORE_SUCCESS;
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+ ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
}
/* DQ PF */
@@ -1569,7 +1639,7 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
struct ecore_src_iids src_iids;
OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
- ecore_cxt_src_iids(p_mngr, &src_iids);
+ ecore_cxt_src_iids(p_hwfn, p_mngr, &src_iids);
conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
if (!conn_num)
return;
@@ -1585,6 +1655,9 @@ static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
p_hwfn->p_cxt_mngr->first_free);
STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
p_hwfn->p_cxt_mngr->last_free);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
+ "Configured SEARCHER for 0x%08x connections\n",
+ conn_num);
}
/* Timers PF */
@@ -1724,93 +1797,150 @@ void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn)
ecore_prs_init_pf(p_hwfn);
}
-enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
- enum protocol_type type, u32 *p_cid)
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map;
u32 rel_cid;
- if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+ if (type >= MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
+ return ECORE_INVAL;
+ }
+
+ if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
+ DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
+ return ECORE_INVAL;
+ }
+
+ /* Determine the right map to take this CID from */
+ if (vfid == ECORE_CXT_PF_CID)
+ p_map = &p_mngr->acquired[type];
+ else
+ p_map = &p_mngr->acquired_vf[type][vfid];
+
+ if (p_map->cid_map == OSAL_NULL) {
DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
return ECORE_INVAL;
}
- rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_mngr->acquired[type].cid_map,
- p_mngr->acquired[type].max_count);
+ rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map,
+ p_map->max_count);
- if (rel_cid >= p_mngr->acquired[type].max_count) {
+ if (rel_cid >= p_map->max_count) {
DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
type);
return ECORE_NORESOURCES;
}
- OSAL_SET_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+ OSAL_SET_BIT(rel_cid, p_map->cid_map);
- *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+ *p_cid = rel_cid + p_map->start_cid;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
+ *p_cid, rel_cid, vfid, type);
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid)
+{
+ return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID);
+}
+
static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
- u32 cid, enum protocol_type *p_type)
+ u32 cid, u8 vfid,
+ enum protocol_type *p_type,
+ struct ecore_cid_acquired_map **pp_map)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct ecore_cid_acquired_map *p_map;
- enum protocol_type p;
u32 rel_cid;
/* Iterate over protocols and find matching cid range */
- for (p = 0; p < MAX_CONN_TYPES; p++) {
- p_map = &p_mngr->acquired[p];
+ for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
+ if (vfid == ECORE_CXT_PF_CID)
+ *pp_map = &p_mngr->acquired[*p_type];
+ else
+ *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
- if (!p_map->cid_map)
+ if (!((*pp_map)->cid_map))
continue;
- if (cid >= p_map->start_cid &&
- cid < p_map->start_cid + p_map->max_count) {
+ if (cid >= (*pp_map)->start_cid &&
+ cid < (*pp_map)->start_cid + (*pp_map)->max_count) {
break;
}
}
- *p_type = p;
-
- if (p == MAX_CONN_TYPES) {
- DP_NOTICE(p_hwfn, true, "Invalid CID %d", cid);
- return false;
+ if (*p_type == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
+ goto fail;
}
- rel_cid = cid - p_map->start_cid;
- if (!OSAL_TEST_BIT(rel_cid, p_map->cid_map)) {
- DP_NOTICE(p_hwfn, true, "CID %d not acquired", cid);
- return false;
+
+ rel_cid = cid - (*pp_map)->start_cid;
+ if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
+ DP_NOTICE(p_hwfn, true,
+ "CID %d [vifd %02x] not acquired", cid, vfid);
+ goto fail;
}
+
return true;
+fail:
+ *p_type = MAX_CONN_TYPES;
+ *pp_map = OSAL_NULL;
+ return false;
}
-void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
enum protocol_type type;
bool b_acquired;
u32 rel_cid;
+ if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
+ DP_NOTICE(p_hwfn, true,
+ "Trying to return incorrect CID belonging to VF %02x\n",
+ vfid);
+ return;
+ }
+
/* Test acquired and find matching per-protocol map */
- b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, &type);
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid,
+ &type, &p_map);
if (!b_acquired)
return;
- rel_cid = cid - p_mngr->acquired[type].start_cid;
- OSAL_CLEAR_BIT(rel_cid, p_mngr->acquired[type].cid_map);
+ rel_cid = cid - p_map->start_cid;
+ OSAL_CLEAR_BIT(rel_cid, p_map->cid_map);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
+ "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
+ cid, rel_cid, vfid, type);
+}
+
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+ _ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID);
}
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info)
{
struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct ecore_cid_acquired_map *p_map = OSAL_NULL;
u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
enum protocol_type type;
bool b_acquired;
/* Test acquired and find matching per-protocol map */
- b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+ b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid,
+ ECORE_CXT_PF_CID,
+ &type, &p_map);
if (!b_acquired)
return ECORE_INVAL;
@@ -1839,7 +1969,7 @@ enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
+static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
{
struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
@@ -1866,10 +1996,15 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
struct ecore_eth_pf_params *p_params =
&p_hwfn->pf_params.eth_pf_params;
- ecore_cxt_set_proto_cid_count(p_hwfn,
- PROTOCOLID_ETH,
- p_params->num_cons, 1); /* FIXME VF count... */
-
+ /* TODO - we probably want to add VF number to the PF
+ * params;
+ * As of now, allocates 16 * 2 per-VF [to retain regular
+ * functionality].
+ */
+ ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons, 32);
+ p_hwfn->p_cxt_mngr->arfs_count =
+ p_params->num_arfs_filters;
break;
}
default:
@@ -1879,47 +2014,6 @@ enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
- struct ecore_tid_mem *p_info)
-{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 proto, seg, total_lines, i, shadow_line;
- struct ecore_ilt_client_cfg *p_cli;
- struct ecore_ilt_cli_blk *p_fl_seg;
- struct ecore_tid_seg *p_seg_info;
-
- /* Verify the personality */
- switch (p_hwfn->hw_info.personality) {
- default:
- return ECORE_INVAL;
- }
-
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
- if (!p_cli->active)
- return ECORE_INVAL;
-
- p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
- if (!p_seg_info->has_fl_mem)
- return ECORE_INVAL;
-
- p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
- total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
- p_fl_seg->real_size_in_page);
-
- for (i = 0; i < total_lines; i++) {
- shadow_line = i + p_fl_seg->start_line -
- p_hwfn->p_cxt_mngr->pf_start_line;
- p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
- }
- p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
- p_fl_seg->real_size_in_page;
- p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
- p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
- p_info->tid_size;
-
- return ECORE_SUCCESS;
-}
-
/* This function is very RoCE oriented, if another protocol in the future
* will want this feature we'll need to modify the function to be more generic
*/
@@ -2157,52 +2251,3 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
return rc;
}
-
-enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
- u32 tid,
- u8 ctx_type, void **pp_task_ctx)
-{
- struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- struct ecore_ilt_client_cfg *p_cli;
- struct ecore_ilt_cli_blk *p_seg;
- struct ecore_tid_seg *p_seg_info;
- u32 proto, seg;
- u32 total_lines;
- u32 tid_size, ilt_idx;
- u32 num_tids_per_block;
-
- /* Verify the personality */
- switch (p_hwfn->hw_info.personality) {
- default:
- return ECORE_INVAL;
- }
-
- p_cli = &p_mngr->clients[ILT_CLI_CDUT];
- if (!p_cli->active)
- return ECORE_INVAL;
-
- p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
-
- if (ctx_type == ECORE_CTX_WORKING_MEM) {
- p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
- } else if (ctx_type == ECORE_CTX_FL_MEM) {
- if (!p_seg_info->has_fl_mem)
- return ECORE_INVAL;
- p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
- } else {
- return ECORE_INVAL;
- }
- total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
- tid_size = p_mngr->task_type_size[p_seg_info->type];
- num_tids_per_block = p_seg->real_size_in_page / tid_size;
-
- if (total_lines < tid / num_tids_per_block)
- return ECORE_INVAL;
-
- ilt_idx = tid / num_tids_per_block + p_seg->start_line -
- p_mngr->pf_start_line;
- *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
- (tid % num_tids_per_block) * tid_size;
-
- return ECORE_SUCCESS;
-}
diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h
index 5379d7bc..6ff823a5 100644
--- a/drivers/net/qede/base/ecore_cxt.h
+++ b/drivers/net/qede/base/ecore_cxt.h
@@ -35,17 +35,6 @@ u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
enum protocol_type type);
u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn);
-#ifndef LINUX_REMOVE
-/**
- * @brief ecore_cxt_qm_iids - fills the cid/tid counts for the QM configuration
- *
- * @param p_hwfn
- * @param iids [out], a structure holding all the counters
- */
-void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
- struct ecore_qm_iids *iids);
-#endif
-
/**
* @brief ecore_cxt_set_pf_params - Set the PF params for cxt init
*
@@ -130,14 +119,53 @@ void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn);
enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+#define ECORE_CXT_PF_CID (0xff)
+
+/**
+ * @brief ecore_cxt_release - Release a cid
+ *
+ * @param p_hwfn
+ * @param cid
+ */
+void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief ecore_cxt_release - Release a cid belonging to a vf-queue
+ *
+ * @param p_hwfn
+ * @param cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ */
+void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
+ u32 cid, u8 vfid);
+
+/**
+ * @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid);
+
/**
-* @brief ecore_cxt_release - Release a cid
-*
-* @param p_hwfn
-* @param cid
-*/
-void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn,
- u32 cid);
+ * @brief _ecore_cxt_acquire - Acquire a new cid of a specific protocol type
+ * for a vf-queue
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ * @param vfid - engine relative index. ECORE_CXT_PF_CID if belongs to PF
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *p_cid, u8 vfid);
/**
* @brief ecore_cxt_get_tid_mem_info - function checks if the
@@ -169,9 +197,5 @@ enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn,
#define ECORE_CTX_WORKING_MEM 0
#define ECORE_CTX_FL_MEM 1
-enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn,
- u32 tid,
- u8 ctx_type,
- void **task_ctx);
#endif /* _ECORE_CID_ */
diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h
index 6a50412a..6d87620d 100644
--- a/drivers/net/qede/base/ecore_cxt_api.h
+++ b/drivers/net/qede/base/ecore_cxt_api.h
@@ -26,19 +26,6 @@ struct ecore_tid_mem {
};
/**
-* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type
-*
-* @param p_hwfn
-* @param type
-* @param p_cid
-*
-* @return enum _ecore_status_t
-*/
-enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
- enum protocol_type type,
- u32 *p_cid);
-
-/**
* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid
*
*
@@ -50,15 +37,4 @@ enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
struct ecore_cxt_info *p_info);
-/**
-* @brief ecore_cxt_get_tid_mem_info
-*
-* @param p_hwfn
-* @param p_info
-*
-* @return enum _ecore_status_t
-*/
-enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn,
- struct ecore_tid_mem *p_info);
-
#endif
diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c
index 8175619a..4f1b0698 100644
--- a/drivers/net/qede/base/ecore_dcbx.c
+++ b/drivers/net/qede/base/ecore_dcbx.c
@@ -13,6 +13,7 @@
#include "ecore_cxt.h"
#include "ecore_gtt_reg_addr.h"
#include "ecore_iro.h"
+#include "ecore_iov_api.h"
#define ECORE_DCBX_MAX_MIB_READ_TRY (100)
#define ECORE_ETH_TYPE_DEFAULT (0)
@@ -27,14 +28,25 @@
static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap)
{
- return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
- DCBX_APP_SF_ETHTYPE) ? true : false;
+ return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool ecore_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return ecore_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
}
static bool ecore_dcbx_app_port(u32 app_info_bitmap)
{
- return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
- DCBX_APP_SF_PORT) ? true : false;
+ return !!(ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
}
static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
@@ -45,100 +57,67 @@ static bool ecore_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
return ecore_dcbx_app_port(app_info_bitmap);
- return (mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT) ?
- true : false;
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
}
-static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return (ecore_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == ECORE_ETH_TYPE_DEFAULT) ? true : false;
-}
+ bool ethtype;
-static bool ecore_dcbx_enabled(u32 dcbx_cfg_bitmap)
-{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_DISABLED) ? false : true;
-}
+ if (ieee)
+ ethtype = ecore_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = ecore_dcbx_app_ethtype(app_info_bitmap);
-static bool ecore_dcbx_cee(u32 dcbx_cfg_bitmap)
-{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_CEE) ? true : false;
+ return !!(ethtype && (proto_id == ECORE_ETH_TYPE_DEFAULT));
}
-static bool ecore_dcbx_ieee(u32 dcbx_cfg_bitmap)
+static bool ecore_dcbx_iwarp_tlv(struct ecore_hwfn *p_hwfn, u32 app_info_bitmap,
+ u16 proto_id, bool ieee)
{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_IEEE) ? true : false;
-}
+ bool port;
-static bool ecore_dcbx_local(u32 dcbx_cfg_bitmap)
-{
- return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) ==
- DCBX_CONFIG_VERSION_STATIC) ? true : false;
-}
+ if (!p_hwfn->p_dcbx_info->iwarp_port)
+ return false;
-/* @@@TBD A0 Eagle workaround */
-void ecore_dcbx_eagle_workaround(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, bool set_to_pfc)
-{
- if (!ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
- return;
-
- ecore_wr(p_hwfn, p_ptt,
- YSEM_REG_FAST_MEMORY + 0x20000 /* RAM in FASTMEM */ +
- YSTORM_FLOW_CONTROL_MODE_OFFSET,
- set_to_pfc ? flow_ctrl_pfc : flow_ctrl_pause);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_FLOWCTRL_MODE,
- EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE);
+ if (ieee)
+ port = ecore_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = ecore_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == p_hwfn->p_dcbx_info->iwarp_port));
}
static void
ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_results *p_data)
{
- struct ecore_hw_info *p_info = &p_hwfn->hw_info;
enum dcbx_protocol_type id;
- u8 prio, tc, size, update;
- bool enable;
- const char *name; /* @DPDK */
int i;
- size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
-
- DP_INFO(p_hwfn, "DCBX negotiated: %d\n", p_data->dcbx_enabled);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
id = ecore_dcbx_app_update[i].id;
- name = ecore_dcbx_app_update[i].name;
-
- enable = p_data->arr[id].enable;
- update = p_data->arr[id].update;
- tc = p_data->arr[id].tc;
- prio = p_data->arr[id].priority;
- DP_INFO(p_hwfn,
- "%s info: update %d, enable %d, prio %d, tc %d,"
- " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
- name, update, enable, prio, tc, p_info->num_active_tc,
- p_data->arr[id].dscp_enable, p_data->arr[id].dscp_val);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d,"
+ " num_active_tc %d dscp_enable = %d dscp_val = %d\n",
+ ecore_dcbx_app_update[i].name,
+ p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc,
+ p_data->arr[id].dscp_enable,
+ p_data->arr[id].dscp_val);
}
}
static void
-ecore_dcbx_set_pf_tcs(struct ecore_hw_info *p_info,
- u8 tc, enum ecore_pci_personality personality)
-{
- /* QM reconf data */
- if (p_info->personality == personality)
- p_info->offload_tc = tc;
-}
-
-void
ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
- bool enable, bool update, u8 prio, u8 tc,
+ bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum ecore_pci_personality personality)
{
@@ -164,27 +143,26 @@ ecore_dcbx_set_params(struct ecore_dcbx_results *p_data,
else if (enable)
p_data->arr[type].update = UPDATE_DCB;
else
- p_data->arr[type].update = DONT_UPDATE_DCB_DHCP;
+ p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
- ecore_dcbx_set_pf_tcs(&p_hwfn->hw_info, tc, personality);
+ /* QM reconf data */
+ if (p_hwfn->hw_info.personality == personality)
+ p_hwfn->hw_info.offload_tc = tc;
}
/* Update app protocol data and hw_info fields with the TLV info */
static void
ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
struct ecore_hwfn *p_hwfn,
- bool enable, bool update, u8 prio, u8 tc,
+ bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type)
{
enum ecore_pci_personality personality;
enum dcbx_protocol_type id;
const char *name; /* @DPDK */
- u8 size;
int i;
- size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update);
-
- for (i = 0; i < size; i++) {
+ for (i = 0; i < OSAL_ARRAY_SIZE(ecore_dcbx_app_update); i++) {
id = ecore_dcbx_app_update[i].id;
if (type != id)
@@ -193,7 +171,7 @@ ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data,
personality = ecore_dcbx_app_update[i].personality;
name = ecore_dcbx_app_update[i].name;
- ecore_dcbx_set_params(p_data, p_hwfn, enable, update,
+ ecore_dcbx_set_params(p_data, p_hwfn, enable,
prio, tc, type, personality);
}
}
@@ -232,20 +210,18 @@ ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn,
u32 app_prio_bitmap, u16 id,
enum dcbx_protocol_type *type, bool ieee)
{
- bool status = false;
-
- if (ecore_dcbx_default_tlv(app_prio_bitmap, id)) {
+ if (ecore_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ETH;
- status = true;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
DP_ERR(p_hwfn,
"No action required, App TLV id = 0x%x"
" app_prio_bitmap = 0x%x\n",
id, app_prio_bitmap);
+ return false;
}
- return status;
+ return true;
}
/* Parse app TLV's to update TC information in hw_info structure for
@@ -257,14 +233,17 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl,
int count, u8 dcbx_version)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
- u8 tc, priority, priority_map;
enum dcbx_protocol_type type;
+ u8 tc, priority_map;
bool enable, ieee;
u16 protocol_id;
+ u8 priority;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Num APP entries = %d\n", count);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Num APP entries = %d pri_tc_tbl = 0x%x dcbx_version = %u\n",
+ count, pri_tc_tbl, dcbx_version);
ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
/* Parse APP TLV */
@@ -273,10 +252,12 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
DCBX_APP_PROTOCOL_ID);
priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_PRI_MAP);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Id = 0x%x pri_map = %u\n",
+ protocol_id, priority_map);
rc = ecore_dcbx_get_app_priority(priority_map, &priority);
if (rc == ECORE_INVAL) {
DP_ERR(p_hwfn, "Invalid priority\n");
- return rc;
+ return ECORE_INVAL;
}
tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority);
@@ -289,9 +270,9 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
* indication, but we only got here if there was an
* app tlv for the protocol, so dcbx must be enabled.
*/
- enable = (type == DCBX_PROTOCOL_ETH ? false : true);
+ enable = !(type == DCBX_PROTOCOL_ETH);
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
}
@@ -308,7 +289,7 @@ ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn,
continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
- ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ ecore_dcbx_update_app_info(p_data, p_hwfn, enable,
priority, tc, type);
}
@@ -350,6 +331,7 @@ ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn)
p_info->num_active_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
data.pf_id = p_hwfn->rel_pf_id;
data.dcbx_enabled = !!dcbx_version;
@@ -371,6 +353,9 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
u32 prefix_seq_num, suffix_seq_num;
int read_count = 0;
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
do {
if (type == ECORE_DCBX_REMOTE_LLDP_MIB) {
ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
@@ -403,21 +388,20 @@ ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn,
return rc;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_app_prio *p_prio,
struct ecore_dcbx_results *p_results)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 val;
if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
- p_results->arr[DCBX_PROTOCOL_ETH].enable) {
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
- DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "Priority: eth %d\n", p_prio->eth);
- }
- return rc;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "Priorities: eth %d\n",
+ p_prio->eth);
}
static void
@@ -508,8 +492,9 @@ ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn,
p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "PFC params: willing %d, pfc_bitmap %d\n",
- p_params->pfc.willing, pfc_map);
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
+ p_params->pfc.enabled);
}
static void
@@ -528,10 +513,10 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags,
DCBX_ETS_MAX_TCS);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
- "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x"
- " max_ets_tc %d\n",
- p_params->ets_willing, p_params->ets_cbs,
- p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+ "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_enabled,
+ p_params->ets_cbs, p_ets->pri_tc_tbl[0],
+ p_params->max_ets_tc);
/* 8 bit tsa and bw data corresponding to each of the 8 TC's are
* encoded in a type u32 array of size 2.
@@ -540,7 +525,7 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
bw_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_bw_tbl[1]);
tsa_map[0] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[0]);
tsa_map[1] = OSAL_BE32_TO_CPU(p_ets->tc_tsa_tbl[1]);
- pri_map = OSAL_BE32_TO_CPU(p_ets->pri_tc_tbl[0]);
+ pri_map = p_ets->pri_tc_tbl[0];
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -552,7 +537,7 @@ ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn,
}
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
@@ -563,60 +548,35 @@ ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn,
ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
-
- return ECORE_SUCCESS;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_admin_params *p_local;
- struct dcbx_app_priority_feature *p_app;
- struct dcbx_app_priority_entry *p_tbl;
- struct ecore_dcbx_params *p_data;
- struct dcbx_ets_feature *p_ets;
- u32 pfc;
-
- p_local = &params->local;
- p_data = &p_local->params;
- p_app = &p_hwfn->p_dcbx_info->local_admin.features.app;
- p_tbl = p_app->app_pri_tbl;
- p_ets = &p_hwfn->p_dcbx_info->local_admin.features.ets;
- pfc = p_hwfn->p_dcbx_info->local_admin.features.pfc;
-
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
- false);
- p_local->valid = true;
+ struct dcbx_features *p_feat;
- return ECORE_SUCCESS;
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params, false);
+ params->local.valid = true;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_remote_params *p_remote;
- struct dcbx_app_priority_feature *p_app;
- struct dcbx_app_priority_entry *p_tbl;
- struct ecore_dcbx_params *p_data;
- struct dcbx_ets_feature *p_ets;
- u32 pfc;
-
- p_remote = &params->remote;
- p_data = &p_remote->params;
- p_app = &p_hwfn->p_dcbx_info->remote.features.app;
- p_tbl = p_app->app_pri_tbl;
- p_ets = &p_hwfn->p_dcbx_info->remote.features.ets;
- pfc = p_hwfn->p_dcbx_info->remote.features.pfc;
+ struct dcbx_features *p_feat;
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params,
false);
- p_remote->valid = true;
-
- return ECORE_SUCCESS;
+ params->remote.valid = true;
}
static enum _ecore_status_t
@@ -625,14 +585,11 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_get *params)
{
struct ecore_dcbx_operational_params *p_operational;
- enum _ecore_status_t rc = ECORE_SUCCESS;
- struct dcbx_app_priority_feature *p_app;
- struct dcbx_app_priority_entry *p_tbl;
struct ecore_dcbx_results *p_results;
- struct ecore_dcbx_params *p_data;
- struct dcbx_ets_feature *p_ets;
+ struct dcbx_features *p_feat;
bool enabled, err;
- u32 pfc, flags;
+ u32 flags;
+ bool val;
flags = p_hwfn->p_dcbx_info->operational.flags;
@@ -640,42 +597,50 @@ ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn,
* was successfuly performed
*/
p_operational = &params->operational;
- enabled = ecore_dcbx_enabled(flags);
+ enabled = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
if (!enabled) {
p_operational->enabled = enabled;
p_operational->valid = false;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Dcbx is disabled\n");
return ECORE_INVAL;
}
- p_data = &p_operational->params;
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
p_results = &p_hwfn->p_dcbx_info->results;
- p_app = &p_hwfn->p_dcbx_info->operational.features.app;
- p_tbl = p_app->app_pri_tbl;
- p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
- pfc = p_hwfn->p_dcbx_info->operational.features.pfc;
- p_operational->ieee = ecore_dcbx_ieee(flags);
- p_operational->cee = ecore_dcbx_cee(flags);
- p_operational->local = ecore_dcbx_local(flags);
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ val = !!(ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC);
+ p_operational->local = val;
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"Version support: ieee %d, cee %d, static %d\n",
p_operational->ieee, p_operational->cee,
p_operational->local);
- ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data,
+ ecore_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params,
p_operational->ieee);
ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio,
p_results);
- err = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ err = ECORE_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
p_operational->err = err;
p_operational->enabled = enabled;
p_operational->valid = true;
- return rc;
+ return ECORE_SUCCESS;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
@@ -700,62 +665,46 @@ ecore_dcbx_get_dscp_params(struct ecore_hwfn *p_hwfn,
p_dscp->dscp_pri_map[entry] = (u32)(pri_map >>
(j * 4)) & 0xf;
}
-
- return ECORE_SUCCESS;
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_lldp_local *p_local;
- osal_size_t size;
- u32 *dest;
-
- p_local = &params->lldp_local;
-
- size = OSAL_ARRAY_SIZE(p_local->local_chassis_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_local.local_chassis_id;
- OSAL_MEMCPY(dest, p_local->local_chassis_id, size);
+ struct lldp_config_params_s *p_local;
- size = OSAL_ARRAY_SIZE(p_local->local_port_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_local.local_port_id;
- OSAL_MEMCPY(dest, p_local->local_port_id, size);
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
- return ECORE_SUCCESS;
+ OSAL_MEMCPY(params->lldp_local.local_chassis_id,
+ p_local->local_chassis_id,
+ OSAL_ARRAY_SIZE(p_local->local_chassis_id));
+ OSAL_MEMCPY(params->lldp_local.local_port_id, p_local->local_port_id,
+ OSAL_ARRAY_SIZE(p_local->local_port_id));
}
-static enum _ecore_status_t
+static void
ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_dcbx_get *params)
{
- struct ecore_dcbx_lldp_remote *p_remote;
- osal_size_t size;
- u32 *dest;
-
- p_remote = &params->lldp_remote;
+ struct lldp_status_params_s *p_remote;
- size = OSAL_ARRAY_SIZE(p_remote->peer_chassis_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_chassis_id;
- OSAL_MEMCPY(dest, p_remote->peer_chassis_id, size);
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
- size = OSAL_ARRAY_SIZE(p_remote->peer_port_id);
- dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_port_id;
- OSAL_MEMCPY(dest, p_remote->peer_port_id, size);
-
- return ECORE_SUCCESS;
+ OSAL_MEMCPY(params->lldp_remote.peer_chassis_id,
+ p_remote->peer_chassis_id,
+ OSAL_ARRAY_SIZE(p_remote->peer_chassis_id));
+ OSAL_MEMCPY(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ OSAL_ARRAY_SIZE(p_remote->peer_port_id));
}
static enum _ecore_status_t
-ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, enum ecore_mib_read_type type)
+ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_dcbx_get *p_params,
+ enum ecore_mib_read_type type)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- struct ecore_dcbx_get *p_params;
-
- p_params = &p_hwfn->p_dcbx_info->get;
switch (type) {
case ECORE_DCBX_REMOTE_MIB:
@@ -768,10 +717,10 @@ ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn,
ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
break;
case ECORE_DCBX_REMOTE_LLDP_MIB:
- rc = ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
break;
case ECORE_DCBX_LOCAL_LLDP_MIB:
- rc = ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
@@ -785,9 +734,10 @@ static enum _ecore_status_t
ecore_dcbx_read_local_lldp_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
lldp_config_params);
data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
@@ -802,8 +752,8 @@ ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_dcbx_mib_meta_data data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
@@ -857,6 +807,7 @@ ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
struct ecore_dcbx_mib_meta_data data;
enum _ecore_status_t rc = ECORE_SUCCESS;
+ OSAL_MEM_ZERO(&data, sizeof(data));
data.addr = p_hwfn->mcp_info->port_addr +
offsetof(struct public_port, local_admin_dcbx_mib);
data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
@@ -883,7 +834,7 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
enum ecore_mib_read_type type)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
+ enum _ecore_status_t rc = ECORE_INVAL;
switch (type) {
case ECORE_DCBX_OPERATIONAL_MIB:
@@ -904,7 +855,6 @@ static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn,
break;
default:
DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
- return ECORE_INVAL;
}
return rc;
@@ -945,10 +895,9 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
* according to negotiation results
*/
enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled;
- ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, enabled);
}
}
- ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+ ecore_dcbx_get_params(p_hwfn, p_ptt, &p_hwfn->p_dcbx_info->get, type);
/* Update the DSCP to TC mapping bit if required */
if ((type == ECORE_DCBX_OPERATIONAL_MIB) &&
@@ -964,17 +913,18 @@ ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct ecore_dcbx_info));
+ sizeof(*p_hwfn->p_dcbx_info));
if (!p_hwfn->p_dcbx_info) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_dcbx_info'");
- rc = ECORE_NOMEM;
+ return ECORE_NOMEM;
}
- return rc;
+ p_hwfn->p_dcbx_info->iwarp_port =
+ p_hwfn->pf_params.rdma_pf_params.iwarp_port;
+
+ return ECORE_SUCCESS;
}
void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn,
@@ -999,24 +949,31 @@ void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest)
{
struct protocol_dcb_data *p_dcb_data;
- bool update_flag = false;
+ u8 update_flag;
p_dest->pf_id = p_src->pf_id;
update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
- p_dest->update_eth_dcb_data_flag = update_flag;
+ p_dest->update_eth_dcb_data_mode = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_IWARP].update;
+ p_dest->update_iwarp_dcb_data_mode = update_flag;
p_dcb_data = &p_dest->eth_dcb_data;
ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+ p_dcb_data = &p_dest->iwarp_dcb_data;
+ ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_IWARP);
}
-static
-enum _ecore_status_t ecore_dcbx_query(struct ecore_hwfn *p_hwfn,
- enum ecore_mib_read_type type)
+enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_dcbx_get *p_get,
+ enum ecore_mib_read_type type)
{
struct ecore_ptt *p_ptt;
enum _ecore_status_t rc;
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
p_ptt = ecore_ptt_acquire(p_hwfn);
if (!p_ptt) {
rc = ECORE_TIMEOUT;
@@ -1028,30 +985,13 @@ enum _ecore_status_t ecore_dcbx_query(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
goto out;
- rc = ecore_dcbx_get_params(p_hwfn, p_ptt, type);
+ rc = ecore_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
out:
ecore_ptt_release(p_hwfn, p_ptt);
return rc;
}
-enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn,
- struct ecore_dcbx_get *p_get,
- enum ecore_mib_read_type type)
-{
- enum _ecore_status_t rc;
-
- rc = ecore_dcbx_query(p_hwfn, type);
- if (rc)
- return rc;
-
- if (p_get != OSAL_NULL)
- OSAL_MEMCPY(p_get, &p_hwfn->p_dcbx_info->get,
- sizeof(struct ecore_dcbx_get));
-
- return rc;
-}
-
static void
ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
u32 *pfc, struct ecore_dcbx_params *p_params)
@@ -1074,8 +1014,8 @@ ecore_dcbx_set_pfc_data(struct ecore_hwfn *p_hwfn,
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
if (p_params->pfc.prio[i])
- pfc_map |= (0x1 << i);
-
+ pfc_map |= (1 << i);
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -1087,6 +1027,7 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_params *p_params)
{
u8 *bw_map, *tsa_map;
+ u32 val;
int i;
if (p_params->ets_willing)
@@ -1113,14 +1054,22 @@ ecore_dcbx_set_ets_data(struct ecore_hwfn *p_hwfn,
for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++) {
bw_map[i] = p_params->ets_tc_bw_tbl[i];
tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
- p_ets->pri_tc_tbl[0] |= (((u32)p_params->ets_pri_tc_tbl[i]) <<
- ((7 - i) * 4));
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
}
- p_ets->pri_tc_tbl[0] = OSAL_CPU_TO_BE32(p_ets->pri_tc_tbl[0]);
for (i = 0; i < 2; i++) {
p_ets->tc_bw_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_bw_tbl[i]);
p_ets->tc_tsa_tbl[i] = OSAL_CPU_TO_BE32(p_ets->tc_tsa_tbl[i]);
}
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
+ "flags = 0x%x pri_tc = 0x%x tc_bwl[] = {0x%x, 0x%x} tc_tsa = {0x%x, 0x%x}\n",
+ p_ets->flags, p_ets->pri_tc_tbl[0], p_ets->tc_bw_tbl[0],
+ p_ets->tc_bw_tbl[1], p_ets->tc_tsa_tbl[0],
+ p_ets->tc_tsa_tbl[1]);
}
static void
@@ -1147,24 +1096,33 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_app->app_pri_tbl[i].entry;
+ *entry = 0;
if (ieee) {
- *entry &= ~DCBX_APP_SF_IEEE_MASK;
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
switch (p_params->app_entry[i].sf_ieee) {
case ECORE_DCBX_SF_IEEE_ETHTYPE:
*entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
break;
case ECORE_DCBX_SF_IEEE_TCP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
case ECORE_DCBX_SF_IEEE_UDP_PORT:
*entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
case ECORE_DCBX_SF_IEEE_TCP_UDP_PORT:
*entry |= (u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
DCBX_APP_SF_IEEE_SHIFT;
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
break;
}
} else {
@@ -1183,6 +1141,8 @@ ecore_dcbx_set_app_data(struct ecore_hwfn *p_hwfn,
*entry |= ((u32)(p_params->app_entry[i].prio) <<
DCBX_APP_PRI_MAP_SHIFT);
}
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_app->flags);
}
static enum _ecore_status_t
@@ -1195,7 +1155,7 @@ ecore_dcbx_set_local_params(struct ecore_hwfn *p_hwfn,
local_admin->flags = 0;
OSAL_MEMCPY(&local_admin->features,
&p_hwfn->p_dcbx_info->operational.features,
- sizeof(struct dcbx_features));
+ sizeof(local_admin->features));
if (params->enabled) {
local_admin->config = params->ver_num;
@@ -1230,10 +1190,9 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
OSAL_MEMCPY(p_dscp_map, &p_hwfn->p_dcbx_info->dscp_map,
sizeof(*p_dscp_map));
+ p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
if (p_params->dscp.enabled)
p_dscp_map->flags |= DCB_DSCP_ENABLE_MASK;
- else
- p_dscp_map->flags &= ~DCB_DSCP_ENABLE_MASK;
for (i = 0, entry = 0; i < 8; i++) {
val = 0;
@@ -1246,6 +1205,8 @@ ecore_dcbx_set_dscp_params(struct ecore_hwfn *p_hwfn,
p_hwfn->p_dcbx_info->dscp_nig_update = true;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "flags = 0x%x\n", p_dscp_map->flags);
+
return ECORE_SUCCESS;
}
@@ -1254,15 +1215,15 @@ enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *p_hwfn,
struct ecore_dcbx_set *params,
bool hw_commit)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
- struct ecore_dcbx_mib_meta_data data;
struct dcbx_local_params local_admin;
+ struct ecore_dcbx_mib_meta_data data;
struct dcb_dscp_map dscp_map;
u32 resp = 0, param = 0;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
if (!hw_commit) {
OSAL_MEMCPY(&p_hwfn->p_dcbx_info->set, params,
- sizeof(struct ecore_dcbx_set));
+ sizeof(p_hwfn->p_dcbx_info->set));
return ECORE_SUCCESS;
}
@@ -1315,12 +1276,13 @@ enum _ecore_status_t ecore_dcbx_get_config_params(struct ecore_hwfn *p_hwfn,
}
dcbx_info = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
- sizeof(struct ecore_dcbx_get));
+ sizeof(*dcbx_info));
if (!dcbx_info) {
DP_ERR(p_hwfn, "Failed to allocate struct ecore_dcbx_info\n");
return ECORE_NOMEM;
}
+ OSAL_MEMSET(dcbx_info, 0, sizeof(*dcbx_info));
rc = ecore_dcbx_query_params(p_hwfn, dcbx_info,
ECORE_DCBX_OPERATIONAL_MIB);
if (rc) {
diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h
index 15186246..eba2d91b 100644
--- a/drivers/net/qede/base/ecore_dcbx.h
+++ b/drivers/net/qede/base/ecore_dcbx.h
@@ -17,9 +17,6 @@
#include "ecore_hsi_common.h"
#include "ecore_dcbx_api.h"
-#define ECORE_MFW_GET_FIELD(name, field) \
- (((name) & (field ## _MASK)) >> (field ## _SHIFT))
-
struct ecore_dcbx_info {
struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
@@ -32,6 +29,7 @@ struct ecore_dcbx_info {
struct ecore_dcbx_set set;
struct ecore_dcbx_get get;
u8 dcbx_cap;
+ u16 iwarp_port;
};
struct ecore_dcbx_mib_meta_data {
@@ -56,10 +54,4 @@ void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *);
void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest);
-#ifndef REAL_ASIC_ONLY
-/* @@@TBD eagle phy workaround */
-void ecore_dcbx_eagle_workaround(struct ecore_hwfn *, struct ecore_ptt *,
- bool set_to_pfc);
-#endif
-
#endif /* __ECORE_DCBX_H__ */
diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h
index 82416e7f..2dc76796 100644
--- a/drivers/net/qede/base/ecore_dcbx_api.h
+++ b/drivers/net/qede/base/ecore_dcbx_api.h
@@ -37,6 +37,7 @@ enum dcbx_protocol_type {
DCBX_PROTOCOL_ROCE,
DCBX_PROTOCOL_ROCE_V2,
DCBX_PROTOCOL_ETH,
+ DCBX_PROTOCOL_IWARP,
DCBX_MAX_PROTOCOL_TYPE
};
@@ -147,6 +148,7 @@ struct ecore_dcbx_get {
#define ECORE_DCBX_VERSION_DISABLED 0
#define ECORE_DCBX_VERSION_IEEE 1
#define ECORE_DCBX_VERSION_CEE 2
+#define ECORE_DCBX_VERSION_DYNAMIC 3
struct ecore_dcbx_set {
#define ECORE_DCBX_OVERRIDE_STATE (1 << 0)
@@ -190,7 +192,8 @@ static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = {
{DCBX_PROTOCOL_FCOE, "FCOE", ECORE_PCI_FCOE},
{DCBX_PROTOCOL_ROCE, "ROCE", ECORE_PCI_ETH_ROCE},
{DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", ECORE_PCI_ETH_ROCE},
- {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH}
+ {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH},
+ {DCBX_PROTOCOL_IWARP, "IWARP", ECORE_PCI_ETH_IWARP}
};
#endif /* __ECORE_DCBX_API_H__ */
diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c
index 6060f9ee..865103c6 100644
--- a/drivers/net/qede/base/ecore_dev.c
+++ b/drivers/net/qede/base/ecore_dev.c
@@ -30,6 +30,7 @@
#include "nvm_cfg.h"
#include "ecore_dev_api.h"
#include "ecore_dcbx.h"
+#include "ecore_l2.h"
/* TODO - there's a bug in DCBx re-configuration flows in MF, as the QM
* registers involved are not split and thus configuration is a race where
@@ -70,28 +71,26 @@ static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
}
val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ if (val)
+ return 1 << (val + 15);
/* The above registers were updated in the past only in CMT mode. Since
* they were found to be useful MFW started updating them from 8.7.7.0.
* In older MFW versions they are set to 0 which means disabled.
*/
- if (!val) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size");
- DP_NOTICE(p_hwfn, false,
- "of 256kB for GRC and 512kB for DB\n");
- return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
- } else {
- DP_NOTICE(p_hwfn, false,
- "BAR size not configured. Assuming BAR size");
- DP_NOTICE(p_hwfn, false,
- "of 512kB for GRC and 512kB for DB\n");
- return 512 * 1024;
- }
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR size of 256kB"
+ " for GRC and 512kB for DB\n");
+ val = BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "BAR size not configured. Assuming BAR size of 512kB"
+ " for GRC and 512kB for DB\n");
+ val = 512 * 1024;
}
- return 1 << (val + 15);
+ return val;
}
void ecore_init_dp(struct ecore_dev *p_dev,
@@ -138,335 +137,620 @@ static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
- qm_info->qm_pq_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
- qm_info->qm_vport_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
- qm_info->qm_port_params = OSAL_NULL;
OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
- qm_info->wfq_data = OSAL_NULL;
}
void ecore_resc_free(struct ecore_dev *p_dev)
{
int i;
- if (IS_VF(p_dev))
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_free(&p_dev->hwfns[i]);
return;
+ }
OSAL_FREE(p_dev, p_dev->fw_data);
- p_dev->fw_data = OSAL_NULL;
OSAL_FREE(p_dev, p_dev->reset_stats);
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
- OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
- p_hwfn->p_tx_cids = OSAL_NULL;
- OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
- p_hwfn->p_rx_cids = OSAL_NULL;
- }
-
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
ecore_cxt_mngr_free(p_hwfn);
ecore_qm_info_free(p_hwfn);
ecore_spq_free(p_hwfn);
- ecore_eq_free(p_hwfn, p_hwfn->p_eq);
- ecore_consq_free(p_hwfn, p_hwfn->p_consq);
+ ecore_eq_free(p_hwfn);
+ ecore_consq_free(p_hwfn);
ecore_int_free(p_hwfn);
-#ifdef CONFIG_ECORE_LL2
- ecore_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
-#endif
ecore_iov_free(p_hwfn);
+ ecore_l2_free(p_hwfn);
ecore_dmae_info_free(p_hwfn);
ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
/* @@@TBD Flush work-queue ? */
}
}
-static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
- bool b_sleepable)
+/******************** QM initialization *******************/
+
+/* bitmaps for indicating active traffic classes.
+ * Special case for Arrowhead 4 port
+ */
+/* 0..3 actualy used, 4 serves OOO, 7 serves high priority stuff (e.g. DCQCN) */
+#define ACTIVE_TCS_BMAP 0x9f
+/* 0..3 actually used, OOO and high priority stuff all use 3 */
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 ecore_get_pq_flags(struct ecore_hwfn *p_hwfn)
{
- u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue;
- struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- struct init_qm_port_params *p_qm_port;
- bool init_rdma_offload_pq = false;
- bool init_pure_ack_pq = false;
- bool init_ooo_pq = false;
- u16 num_pqs, protocol_pqs;
- u16 num_pf_rls = 0;
- u16 num_vfs = 0;
- u32 pf_rl;
- u8 pf_wfq;
-
- /* @TMP - saving the existing min/max bw config before resetting the
- * qm_info to restore them.
- */
- pf_rl = qm_info->pf_rl;
- pf_wfq = qm_info->pf_wfq;
+ u32 flags;
-#ifdef CONFIG_ECORE_SRIOV
- if (p_hwfn->p_dev->p_iov_info)
- num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
-#endif
- OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
+ /* common flags */
+ flags = PQ_FLAGS_LB;
-#ifndef ASIC_ONLY
- /* @TMP - Don't allocate QM queues for VFs on emulation */
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, false,
- "Emulation - skip configuring QM queues for VFs\n");
- num_vfs = 0;
+ /* feature flags */
+ if (IS_ECORE_SRIOV(p_hwfn->p_dev))
+ flags |= PQ_FLAGS_VFS;
+
+ /* protocol flags */
+ switch (p_hwfn->hw_info.personality) {
+ case ECORE_PCI_ETH:
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case ECORE_PCI_FCOE:
+ flags |= PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ISCSI:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ETH_ROCE:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD;
+ break;
+ case ECORE_PCI_ETH_IWARP:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
+ PQ_FLAGS_OFLD;
+ break;
+ default:
+ DP_ERR(p_hwfn, "unknown personality %d\n",
+ p_hwfn->hw_info.personality);
+ return 0;
}
-#endif
+ return flags;
+}
- /* ethernet PFs require a pq per tc. Even if only a subset of the TCs
- * active, we want physical queues allocated for all of them, since we
- * don't have a good recycle flow. Non ethernet PFs require only a
- * single physical queue.
- */
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == ECORE_PCI_IWARP ||
- p_hwfn->hw_info.personality == ECORE_PCI_ETH)
- protocol_pqs = p_hwfn->hw_info.num_hw_tc;
- else
- protocol_pqs = 1;
-
- num_pqs = protocol_pqs + num_vfs + 1; /* The '1' is for pure-LB */
- num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT);
-
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
- num_pqs++; /* for RoCE queue */
- init_rdma_offload_pq = true;
- if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn) {
- /* Due to FW assumption that rl==vport, we limit the
- * number of rate limiters by the minimum between its
- * allocated number and the allocated number of vports.
- * Another limitation is the number of supported qps
- * with rate limiters in FW.
- */
- num_pf_rls =
- (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
- RESC_NUM(p_hwfn, ECORE_VPORT));
+/* Getters for resource amounts necessary for qm initialization */
+u8 ecore_init_qm_get_num_tcs(struct ecore_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.num_hw_tc;
+}
- /* we subtract num_vfs because each one requires a rate
- * limiter, and one default rate limiter.
- */
- if (num_pf_rls < num_vfs + 1) {
- DP_ERR(p_hwfn, "No RL for DCQCN");
- DP_ERR(p_hwfn, "[num_pf_rls %d num_vfs %d]\n",
- num_pf_rls, num_vfs);
- return ECORE_INVAL;
- }
- num_pf_rls -= num_vfs + 1;
- }
+u16 ecore_init_qm_get_num_vfs(struct ecore_hwfn *p_hwfn)
+{
+ return IS_ECORE_SRIOV(p_hwfn->p_dev) ?
+ p_hwfn->p_dev->p_iov_info->total_vfs : 0;
+}
- num_pqs += num_pf_rls;
- qm_info->num_pf_rls = (u8)num_pf_rls;
- }
+#define NUM_DEFAULT_RLS 1
- if (p_hwfn->hw_info.personality == ECORE_PCI_IWARP) {
- num_pqs += 3; /* for iwarp queue / pure-ack / ooo */
- init_rdma_offload_pq = true;
- init_pure_ack_pq = true;
- init_ooo_pq = true;
- }
+u16 ecore_init_qm_get_num_pf_rls(struct ecore_hwfn *p_hwfn)
+{
+ u16 num_pf_rls, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
- if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
- num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
- init_pure_ack_pq = true;
- init_ooo_pq = true;
- }
+ /* @DPDK */
+ /* num RLs can't exceed resource amount of rls or vports or the
+ * dcqcn qps
+ */
+ num_pf_rls = (u16)OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_RL),
+ (u16)RESC_NUM(p_hwfn, ECORE_VPORT));
- /* Sanity checking that setup requires legal number of resources */
- if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
- DP_ERR(p_hwfn,
- "Need too many Physical queues - 0x%04x avail %04x",
- num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
- return ECORE_INVAL;
+ /* make sure after we reserve the default and VF rls we'll have
+ * something left
+ */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) {
+ DP_NOTICE(p_hwfn, false,
+ "no rate limiters left for PF rate limiting"
+ " [num_pf_rls %d num_vfs %d]\n", num_pf_rls, num_vfs);
+ return 0;
}
- /* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
- * then special queues (iSCSI pure-ACK / RoCE), then per-VF PQ.
+ /* subtract rls necessary for VFs and one default one for the PF */
+ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
+
+ return num_pf_rls;
+}
+
+u16 ecore_init_qm_get_num_vports(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ /* all pqs share the same vport (hence the 1 below), except for vfs
+ * and pf_rl pqs
*/
- qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct init_qm_pq_params) *
- num_pqs);
- if (!qm_info->qm_pq_params)
- goto alloc_err;
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn) + 1;
+}
- qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct
- init_qm_vport_params) *
- num_vports);
- if (!qm_info->qm_vport_params)
- goto alloc_err;
+/* calc amount of PQs according to the requested flags */
+u16 ecore_init_qm_get_num_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u32 pq_flags = ecore_get_pq_flags(p_hwfn);
+
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ ecore_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) *
+ ecore_init_qm_get_num_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LB & pq_flags)) +
+ (!!(PQ_FLAGS_OOO & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) +
+ (!!(PQ_FLAGS_OFLD & pq_flags)) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ ecore_init_qm_get_num_vfs(p_hwfn);
+}
- qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct init_qm_port_params)
- * MAX_NUM_PORTS);
- if (!qm_info->qm_port_params)
- goto alloc_err;
+/* initialize the top level QM params */
+static void ecore_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ bool four_port;
- qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev,
- b_sleepable ? GFP_KERNEL :
- GFP_ATOMIC,
- sizeof(struct ecore_wfq_data) *
- num_vports);
+ /* pq and vport bases for this PF */
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
- if (!qm_info->wfq_data)
- goto alloc_err;
+ /* rate limiting and weighted fair queueing are always enabled */
+ qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+
+ /* TC config is different for AH 4 port */
+ four_port = p_hwfn->p_dev->num_ports_in_engines == MAX_NUM_PORTS_K2;
- vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ /* in AH 4 port we have fewer TCs per port */
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+ NUM_OF_PHYS_TCS;
- /* First init rate limited queues ( Due to RoCE assumption of
- * qpid=rlid )
+ /* unless MFW indicated otherwise, ooo_tc should be 3 for AH 4 port and
+ * 4 otherwise
*/
- for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- };
-
- /* Protocol PQs */
- for (i = 0; i < protocol_pqs; i++) {
- struct init_qm_pq_params *params =
- &qm_info->qm_pq_params[curr_queue++];
-
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE ||
- p_hwfn->hw_info.personality == ECORE_PCI_IWARP ||
- p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
- params->vport_id = vport_id;
- params->tc_id = i;
- /* Note: this assumes that if we had a configuration
- * with N tcs and subsequently another configuration
- * With Fewer TCs, the in flight traffic (in QM queues,
- * in FW, from driver to FW) will still trickle out and
- * not get "stuck" in the QM. This is determined by the
- * NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ. Unused TCs are
- * supposed to be cleared in this map, allowing traffic
- * to flush out. If this is not the case, we would need
- * to set the TC of unused queues to 0, and reconfigure
- * QM every time num of TCs changes. Unused queues in
- * this context would mean those intended for TCs where
- * tc_id > hw_info.num_active_tcs.
- */
- params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */
- } else {
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.offload_tc;
- params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */
- }
- }
+ if (!qm_info->ooo_tc)
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+ DCBX_TCP_OOO_TC;
+}
- /* Then init pure-LB PQ */
- qm_info->pure_lb_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id =
- (u8)RESC_START(p_hwfn, ECORE_VPORT);
- qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
-
- qm_info->offload_pq = 0; /* Already initialized for iSCSI/FCoE */
- if (init_rdma_offload_pq) {
- qm_info->offload_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_pure_ack_pq) {
- qm_info->pure_ack_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id =
- p_hwfn->hw_info.offload_tc;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- if (init_ooo_pq) {
- qm_info->ooo_pq = curr_queue;
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
- qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- curr_queue++;
- }
-
- /* Then init per-VF PQs */
- vf_offset = curr_queue;
- for (i = 0; i < num_vfs; i++) {
- /* First vport is used by the PF */
- qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
- /* @@@TBD VF Multi-cos */
- qm_info->qm_pq_params[curr_queue].tc_id = 0;
- qm_info->qm_pq_params[curr_queue].wrr_group = 1;
- qm_info->qm_pq_params[curr_queue].rl_valid = 1;
- curr_queue++;
- };
-
- qm_info->vf_queues_offset = vf_offset;
- qm_info->num_pqs = num_pqs;
- qm_info->num_vports = num_vports;
+/* initialize qm vport params */
+static void ecore_init_qm_vport_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 i;
+ /* all vports participate in weighted fair queueing */
+ for (i = 0; i < ecore_init_qm_get_num_vports(p_hwfn); i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
+}
+
+/* initialize qm port params */
+static void ecore_init_qm_port_params(struct ecore_hwfn *p_hwfn)
+{
/* Initialize qm port parameters */
- num_ports = p_hwfn->p_dev->num_ports_in_engines;
+ u8 i, active_phys_tcs, num_ports = p_hwfn->p_dev->num_ports_in_engines;
+
+ /* indicate how ooo and high pri traffic is dealt with */
+ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+ ACTIVE_TCS_BMAP_4PORT_K2 : ACTIVE_TCS_BMAP;
+
for (i = 0; i < num_ports; i++) {
- p_qm_port = &qm_info->qm_port_params[i];
+ struct init_qm_port_params *p_qm_port =
+ &p_hwfn->qm_info.qm_port_params[i];
+
p_qm_port->active = 1;
- /* @@@TMP - was NUM_OF_PHYS_TCS; Changed until dcbx will
- * be in place
- */
- if (num_ports == 4)
- p_qm_port->active_phys_tcs = 0xf;
- else
- p_qm_port->active_phys_tcs = 0x9f;
+ p_qm_port->active_phys_tcs = active_phys_tcs;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
+}
- if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4))
- qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2;
- else
- qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void ecore_init_qm_reset_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
+ qm_info->num_pqs = 0;
+ qm_info->num_vports = 0;
+ qm_info->num_pf_rls = 0;
+ qm_info->num_vf_pqs = 0;
+ qm_info->first_vf_pq = 0;
+ qm_info->first_mcos_pq = 0;
+ qm_info->first_rl_pq = 0;
+}
+
+static void ecore_init_qm_advance_vport(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_vports++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF)
+ * and whether a new vport is allocated to the pq or not (i.e. vport will be
+ * shared)
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT (1 << 0)
+#define PQ_INIT_PF_RL (1 << 1)
+#define PQ_INIT_VF_RL (1 << 2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP 1
+#define PQ_INIT_DEFAULT_TC 0
+#define PQ_INIT_OFLD_TC (p_hwfn->hw_info.offload_tc)
+
+static void ecore_init_qm_pq(struct ecore_hwfn *p_hwfn,
+ struct ecore_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq =
+ ecore_init_qm_get_num_pqs(p_hwfn);
+
+ if (pq_idx > max_pq)
+ DP_ERR(p_hwfn,
+ "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+ /* init pq params */
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+ qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].tc_id = tc;
+ qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+ qm_info->qm_pq_params[pq_idx].rl_valid =
+ (pq_init_flags & PQ_INIT_PF_RL ||
+ pq_init_flags & PQ_INIT_VF_RL);
+
+ /* qm params accounting */
+ qm_info->num_pqs++;
+ if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+ qm_info->num_vports++;
+
+ if (pq_init_flags & PQ_INIT_PF_RL)
+ qm_info->num_pf_rls++;
+
+ if (qm_info->num_vports > ecore_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d,"
+ " qm_init_get_num_vports() %d\n",
+ qm_info->num_vports,
+ ecore_init_qm_get_num_vports(p_hwfn));
+
+ if (qm_info->num_pf_rls > ecore_init_qm_get_num_pf_rls(p_hwfn))
+ DP_ERR(p_hwfn, "rl overflow! qm_info->num_pf_rls %d,"
+ " qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls,
+ ecore_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *ecore_init_qm_get_idx_from_flags(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+ if (OSAL_BITMAP_WEIGHT((unsigned long *)&pq_flags,
+ sizeof(pq_flags)) > 1)
+ goto err;
+
+ switch (pq_flags) {
+ case PQ_FLAGS_RLS:
+ return &qm_info->first_rl_pq;
+ case PQ_FLAGS_MCOS:
+ return &qm_info->first_mcos_pq;
+ case PQ_FLAGS_LB:
+ return &qm_info->pure_lb_pq;
+ case PQ_FLAGS_OOO:
+ return &qm_info->ooo_pq;
+ case PQ_FLAGS_ACK:
+ return &qm_info->pure_ack_pq;
+ case PQ_FLAGS_OFLD:
+ return &qm_info->offload_pq;
+ case PQ_FLAGS_VFS:
+ return &qm_info->first_vf_pq;
+ default:
+ goto err;
+ }
+
+err:
+ DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
+ return OSAL_NULL;
+}
+
+/* save pq index in qm info */
+static void ecore_init_qm_set_idx(struct ecore_hwfn *p_hwfn,
+ u32 pq_flags, u16 pq_val)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 ecore_get_cm_pq_idx(struct ecore_hwfn *p_hwfn, u32 pq_flags)
+{
+ u16 *base_pq_idx = ecore_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 ecore_get_cm_pq_idx_mcos(struct ecore_hwfn *p_hwfn, u8 tc)
+{
+ u8 max_tc = ecore_init_qm_get_num_tcs(p_hwfn);
+
+ if (tc > max_tc)
+ DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+}
+
+u16 ecore_get_cm_pq_idx_vf(struct ecore_hwfn *p_hwfn, u16 vf)
+{
+ u16 max_vf = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (vf > max_vf)
+ DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+}
+
+u16 ecore_get_cm_pq_idx_rl(struct ecore_hwfn *p_hwfn, u8 rl)
+{
+ u16 max_rl = ecore_init_qm_get_num_pf_rls(p_hwfn);
+
+ if (rl > max_rl)
+ DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
+
+ return ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
+}
+
+/* Functions for creating specific types of pqs */
+static void ecore_init_qm_lb_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_ooo_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_pure_ack_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_offload_pq(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_mcos_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc_idx;
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+ for (tc_idx = 0; tc_idx < ecore_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void ecore_init_qm_vf_pqs(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 vf_idx, num_vfs = ecore_init_qm_get_num_vfs(p_hwfn);
+
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
qm_info->num_vf_pqs = num_vfs;
- qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_DEFAULT_TC,
+ PQ_INIT_VF_RL);
+}
- for (i = 0; i < qm_info->num_vports; i++)
- qm_info->qm_vport_params[i].vport_wfq = 1;
+static void ecore_init_qm_rl_pqs(struct ecore_hwfn *p_hwfn)
+{
+ u16 pf_rls_idx, num_pf_rls = ecore_init_qm_get_num_pf_rls(p_hwfn);
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
- qm_info->vport_rl_en = 1;
- qm_info->vport_wfq_en = 1;
- qm_info->pf_rl = pf_rl;
- qm_info->pf_wfq = pf_wfq;
+ if (!(ecore_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+ return;
+
+ ecore_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+ ecore_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC,
+ PQ_INIT_PF_RL);
+}
+
+static void ecore_init_qm_pq_params(struct ecore_hwfn *p_hwfn)
+{
+ /* rate limited pqs, must come first (FW assumption) */
+ ecore_init_qm_rl_pqs(p_hwfn);
+
+ /* pqs for multi cos */
+ ecore_init_qm_mcos_pqs(p_hwfn);
+
+ /* pure loopback pq */
+ ecore_init_qm_lb_pq(p_hwfn);
+
+ /* out of order pq */
+ ecore_init_qm_ooo_pq(p_hwfn);
+
+ /* pure ack pq */
+ ecore_init_qm_pure_ack_pq(p_hwfn);
+
+ /* pq for offloaded protocol */
+ ecore_init_qm_offload_pq(p_hwfn);
+
+ /* done sharing vports */
+ ecore_init_qm_advance_vport(p_hwfn);
+
+ /* pqs for vfs */
+ ecore_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static enum _ecore_status_t ecore_init_qm_sanity(struct ecore_hwfn *p_hwfn)
+{
+ if (ecore_init_qm_get_num_vports(p_hwfn) >
+ RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+ return ECORE_INVAL;
+ }
+
+ if (ecore_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, ECORE_PQ)) {
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return ECORE_INVAL;
+ }
return ECORE_SUCCESS;
+}
- alloc_err:
- DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
- ecore_qm_info_free(p_hwfn);
- return ECORE_NOMEM;
+/*
+ * Function for verbose printing of the qm initialization results
+ */
+static void ecore_dp_init_qm_params(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_vport_params *vport;
+ struct init_qm_port_params *port;
+ struct init_qm_pq_params *pq;
+ int i, tc;
+
+ /* top level params */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "qm init top level params: start_pq %d, start_vport %d,"
+ " pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
+ qm_info->start_pq, qm_info->start_vport, qm_info->pure_lb_pq,
+ qm_info->offload_pq, qm_info->pure_ack_pq);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d,"
+ " num_vports %d, max_phys_tcs_per_port %d\n",
+ qm_info->ooo_pq, qm_info->first_vf_pq, qm_info->num_pqs,
+ qm_info->num_vf_pqs, qm_info->num_vports,
+ qm_info->max_phys_tcs_per_port);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d,"
+ " pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+ qm_info->pf_rl_en, qm_info->pf_wfq_en, qm_info->vport_rl_en,
+ qm_info->vport_wfq_en, qm_info->pf_wfq, qm_info->pf_rl,
+ qm_info->num_pf_rls, ecore_get_pq_flags(p_hwfn));
+
+ /* port table */
+ for (i = 0; i < p_hwfn->p_dev->num_ports_in_engines; i++) {
+ port = &qm_info->qm_port_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "port idx %d, active %d, active_phys_tcs %d,"
+ " num_pbf_cmd_lines %d, num_btb_blocks %d,"
+ " reserved %d\n",
+ i, port->active, port->active_phys_tcs,
+ port->num_pbf_cmd_lines, port->num_btb_blocks,
+ port->reserved);
+ }
+
+ /* vport table */
+ for (i = 0; i < qm_info->num_vports; i++) {
+ vport = &qm_info->qm_vport_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "vport idx %d, vport_rl %d, wfq %d,"
+ " first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->vport_rl,
+ vport->vport_wfq);
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "%d ",
+ vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "]\n");
+ }
+
+ /* pq table */
+ for (i = 0; i < qm_info->num_pqs; i++) {
+ pq = &qm_info->qm_pq_params[i];
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "pq idx %d, vport_id %d, tc %d, wrr_grp %d,"
+ " rl_valid %d\n",
+ qm_info->start_pq + i, pq->vport_id, pq->tc_id,
+ pq->wrr_group, pq->rl_valid);
+ }
+}
+
+static void ecore_init_qm_info(struct ecore_hwfn *p_hwfn)
+{
+ /* reset params required for init run */
+ ecore_init_qm_reset_params(p_hwfn);
+
+ /* init QM top level params */
+ ecore_init_qm_params(p_hwfn);
+
+ /* init QM port params */
+ ecore_init_qm_port_params(p_hwfn);
+
+ /* init QM vport params */
+ ecore_init_qm_vport_params(p_hwfn);
+
+ /* init QM physical queue params */
+ ecore_init_qm_pq_params(p_hwfn);
+
+ /* display all that init */
+ ecore_dp_init_qm_params(p_hwfn);
}
/* This function reconfigures the QM pf on the fly.
* For this purpose we:
* 1. reconfigure the QM database
- * 2. set new values to runtime arrat
+ * 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM
@@ -478,17 +762,8 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
bool b_rc;
enum _ecore_status_t rc;
- /* qm_info is allocated in ecore_init_qm_info() which is already called
- * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
- * The allocated size may change each init, so we free it before next
- * allocation.
- */
- ecore_qm_info_free(p_hwfn);
-
/* initialize ecore's qm data structure */
- rc = ecore_init_qm_info(p_hwfn, false);
- if (rc != ECORE_SUCCESS)
- return rc;
+ ecore_init_qm_info(p_hwfn);
/* stop PF's qm queues */
OSAL_SPIN_LOCK(&qm_lock);
@@ -521,51 +796,67 @@ enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static enum _ecore_status_t ecore_alloc_qm_data(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
+ enum _ecore_status_t rc;
+
+ rc = ecore_init_qm_sanity(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
+
+ qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_pq_params) *
+ ecore_init_qm_get_num_pqs(p_hwfn));
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_vport_params) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct init_qm_port_params) *
+ p_hwfn->p_dev->num_ports_in_engines);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
+ sizeof(struct ecore_wfq_data) *
+ ecore_init_qm_get_num_vports(p_hwfn));
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ return ECORE_SUCCESS;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
+ ecore_qm_info_free(p_hwfn);
+ return ECORE_NOMEM;
+}
+/******************** End QM initialization ***************/
+
enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
{
- struct ecore_consq *p_consq;
- struct ecore_eq *p_eq;
-#ifdef CONFIG_ECORE_LL2
- struct ecore_ll2_info *p_ll2_info;
-#endif
enum _ecore_status_t rc = ECORE_SUCCESS;
int i;
- if (IS_VF(p_dev))
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i) {
+ rc = ecore_l2_alloc(&p_dev->hwfns[i]);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ }
return rc;
+ }
p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
sizeof(*p_dev->fw_data));
if (!p_dev->fw_data)
return ECORE_NOMEM;
- /* Allocate Memory for the Queue->CID mapping */
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- /* @@@TMP - resc management, change to actual required size */
- int tx_size = sizeof(struct ecore_hw_cid_data) *
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
- int rx_size = sizeof(struct ecore_hw_cid_data) *
- RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
-
- p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- tx_size);
- if (!p_hwfn->p_tx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Tx Cids\n");
- goto alloc_no_mem;
- }
-
- p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
- rx_size);
- if (!p_hwfn->p_rx_cids) {
- DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for Rx Cids\n");
- goto alloc_no_mem;
- }
- }
-
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
u32 n_eqes, num_cons;
@@ -582,11 +873,13 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
if (rc)
goto alloc_err;
- /* Prepare and process QM requirements */
- rc = ecore_init_qm_info(p_hwfn, true);
+ rc = ecore_alloc_qm_data(p_hwfn);
if (rc)
goto alloc_err;
+ /* init qm info */
+ ecore_init_qm_info(p_hwfn);
+
/* Compute the ILT client partition */
rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
if (rc)
@@ -618,8 +911,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
/* EQ */
n_eqes = ecore_chain_get_capacity(&p_hwfn->p_spq->chain);
- if ((p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) ||
- (p_hwfn->hw_info.personality == ECORE_PCI_IWARP)) {
+ if (ECORE_IS_RDMA_PERSONALITY(p_hwfn)) {
/* Calculate the EQ size
* ---------------------
* Each ICID may generate up to one event at a time i.e.
@@ -629,37 +921,38 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
* worst case:
* - Core - according to SPQ.
* - RoCE - per QP there are a couple of ICIDs, one
- * responder and one requester, each can
- * generate an EQE => n_eqes_qp = 2 * n_qp.
- * Each CQ can generate an EQE. There are 2 CQs
- * per QP => n_eqes_cq = 2 * n_qp.
- * Hence the RoCE total is 4 * n_qp or
- * 2 * num_cons.
+ * responder and one requester, each can
+ * generate an EQE => n_eqes_qp = 2 * n_qp.
+ * Each CQ can generate an EQE. There are 2 CQs
+ * per QP => n_eqes_cq = 2 * n_qp.
+ * Hence the RoCE total is 4 * n_qp or
+ * 2 * num_cons.
* - ENet - There can be up to two events per VF. One
- * for VF-PF channel and another for VF FLR
- * initial cleanup. The number of VFs is
- * bounded by MAX_NUM_VFS_BB, and is much
- * smaller than RoCE's so we avoid exact
- * calculation.
+ * for VF-PF channel and another for VF FLR
+ * initial cleanup. The number of VFs is
+ * bounded by MAX_NUM_VFS_BB, and is much
+ * smaller than RoCE's so we avoid exact
+ * calculation.
*/
- if (p_hwfn->hw_info.personality == ECORE_PCI_ETH_ROCE) {
+ if (ECORE_IS_ROCE_PERSONALITY(p_hwfn)) {
num_cons =
ecore_cxt_get_proto_cid_count(
p_hwfn,
PROTOCOLID_ROCE,
- 0);
+ OSAL_NULL);
num_cons *= 2;
} else {
num_cons = ecore_cxt_get_proto_cid_count(
p_hwfn,
PROTOCOLID_IWARP,
- 0);
+ OSAL_NULL);
}
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
} else if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) {
num_cons =
ecore_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI, 0);
+ PROTOCOLID_ISCSI,
+ OSAL_NULL);
n_eqes += 2 * num_cons;
}
@@ -667,33 +960,27 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
DP_ERR(p_hwfn, "Cannot allocate 0x%x EQ elements."
"The maximum of a u16 chain is 0x%x\n",
n_eqes, 0xFFFF);
- goto alloc_err;
+ goto alloc_no_mem;
}
- p_eq = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
- if (!p_eq)
- goto alloc_no_mem;
- p_hwfn->p_eq = p_eq;
+ rc = ecore_eq_alloc(p_hwfn, (u16)n_eqes);
+ if (rc)
+ goto alloc_err;
- p_consq = ecore_consq_alloc(p_hwfn);
- if (!p_consq)
- goto alloc_no_mem;
- p_hwfn->p_consq = p_consq;
-
-#ifdef CONFIG_ECORE_LL2
- if (p_hwfn->using_ll2) {
- p_ll2_info = ecore_ll2_alloc(p_hwfn);
- if (!p_ll2_info)
- goto alloc_no_mem;
- p_hwfn->p_ll2_info = p_ll2_info;
- }
-#endif
+ rc = ecore_consq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = ecore_l2_alloc(p_hwfn);
+ if (rc != ECORE_SUCCESS)
+ goto alloc_err;
/* DMA info initialization */
rc = ecore_dmae_info_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true,
- "Failed to allocate memory for dmae_info structure\n");
+ "Failed to allocate memory for dmae_info"
+ " structure\n");
goto alloc_err;
}
@@ -707,7 +994,7 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
}
p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
- sizeof(struct ecore_eth_stats));
+ sizeof(*p_dev->reset_stats));
if (!p_dev->reset_stats) {
DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
goto alloc_no_mem;
@@ -715,9 +1002,9 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
return ECORE_SUCCESS;
- alloc_no_mem:
+alloc_no_mem:
rc = ECORE_NOMEM;
- alloc_err:
+alloc_err:
ecore_resc_free(p_dev);
return rc;
}
@@ -726,16 +1013,19 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
{
int i;
- if (IS_VF(p_dev))
+ if (IS_VF(p_dev)) {
+ for_each_hwfn(p_dev, i)
+ ecore_l2_setup(&p_dev->hwfns[i]);
return;
+ }
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
ecore_cxt_mngr_setup(p_hwfn);
ecore_spq_setup(p_hwfn);
- ecore_eq_setup(p_hwfn, p_hwfn->p_eq);
- ecore_consq_setup(p_hwfn, p_hwfn->p_consq);
+ ecore_eq_setup(p_hwfn);
+ ecore_consq_setup(p_hwfn);
/* Read shadow of current MFW mailbox */
ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
@@ -745,11 +1035,8 @@ void ecore_resc_setup(struct ecore_dev *p_dev)
ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+ ecore_l2_setup(p_hwfn);
ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
-#ifdef CONFIG_ECORE_LL2
- if (p_hwfn->using_ll2)
- ecore_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
-#endif
}
}
@@ -794,10 +1081,9 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Sending final cleanup for PFVF[%d] [Command %08x\n]",
- id, OSAL_CPU_TO_LE32(command));
+ id, command);
- ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN,
- OSAL_CPU_TO_LE32(command));
+ ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
/* Poll until completion */
while (!REG_RD(p_hwfn, addr) && count--)
@@ -819,10 +1105,8 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
{
int hw_mode = 0;
- if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- hw_mode |= 1 << MODE_BB_A0;
- } else if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
- hw_mode |= 1 << MODE_BB_B0;
+ if (ECORE_IS_BB_B0(p_hwfn->p_dev)) {
+ hw_mode |= 1 << MODE_BB;
} else if (ECORE_IS_AH(p_hwfn->p_dev)) {
hw_mode |= 1 << MODE_K2;
} else {
@@ -877,11 +1161,6 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
#endif
hw_mode |= 1 << MODE_ASIC;
-#ifndef REAL_ASIC_ONLY
- if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
- hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
-#endif
-
if (p_hwfn->p_dev->num_hwfns > 1)
hw_mode |= 1 << MODE_100G;
@@ -899,29 +1178,36 @@ static enum _ecore_status_t ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
static enum _ecore_status_t ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
+ struct ecore_dev *p_dev = p_hwfn->p_dev;
u32 pl_hv = 1;
int i;
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
- pl_hv |= 0x600;
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev))
+ pl_hv |= 0x600;
+ }
ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff);
+ if (CHIP_REV_IS_EMUL(p_dev) &&
+ (ECORE_IS_AH(p_dev)))
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2_K2_E5,
+ 0x3ffffff);
/* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
/* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
- if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0, 4);
+ if (!CHIP_REV_IS_EMUL(p_dev) || ECORE_IS_BB(p_dev))
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB, 4);
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
- /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
- (p_hwfn->p_dev->num_ports_in_engines >> 1));
+ if (CHIP_REV_IS_EMUL(p_dev)) {
+ if (ECORE_IS_AH(p_dev)) {
+ /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
+ (p_dev->num_ports_in_engines >> 1));
- ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
- p_hwfn->p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
+ p_dev->num_ports_in_engines == 4 ? 0 : 3);
+ }
}
/* Poll on RBC */
@@ -987,7 +1273,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_gtt_init(p_hwfn);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (CHIP_REV_IS_EMUL(p_dev)) {
rc = ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1002,7 +1288,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
}
ecore_qm_common_rt_init(p_hwfn,
- p_hwfn->p_dev->num_ports_in_engines,
+ p_dev->num_ports_in_engines,
qm_info->max_phys_tcs_per_port,
qm_info->pf_rl_en, qm_info->pf_wfq_en,
qm_info->vport_rl_en, qm_info->vport_wfq_en,
@@ -1010,18 +1296,6 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_cxt_hw_init_common(p_hwfn);
- /* Close gate from NIG to BRB/Storm; By default they are open, but
- * we close them to prevent NIG from passing data to reset blocks.
- * Should have been done in the ENGINE phase, but init-tool lacks
- * proper port-pretend capabilities.
- */
- ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- ecore_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
- ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
- ecore_port_unpretend(p_hwfn, p_ptt);
-
rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc != ECORE_SUCCESS)
return rc;
@@ -1032,11 +1306,11 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
- if (ECORE_IS_BB(p_hwfn->p_dev)) {
+ if (ECORE_IS_BB(p_dev)) {
/* Workaround clears ROCE search for all functions to prevent
* involving non initialized function in processing ROCE packet.
*/
- num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
+ num_pfs = NUM_OF_ENG_PFS(p_dev);
for (pf_id = 0; pf_id < num_pfs; pf_id++) {
ecore_fid_pretend(p_hwfn, p_ptt, pf_id);
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
@@ -1052,8 +1326,7 @@ static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
* This is not done inside the init tool since it currently can't
* perform a pretending to VFs.
*/
- max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2
- : MAX_NUM_VFS_BB;
+ max_num_vfs = ECORE_IS_AH(p_dev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
@@ -1080,20 +1353,19 @@ static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
{
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
- ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) |
+ ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) |
(8 << PMEG_IF_BYTE_COUNT),
(reg_type << 25) | (addr << 8) | port,
(u32)((data >> 32) & 0xffffffff),
(u32)(data & 0xffffffff));
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0,
- (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) &
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB,
+ (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB) &
0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB_B0,
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB,
(reg_type << 25) | (addr << 8) | port);
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
- data & 0xffffffff);
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB, data & 0xffffffff);
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB,
(data >> 32) & 0xffffffff);
}
@@ -1109,48 +1381,13 @@ static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
#define XLMAC_PAUSE_CTRL (0x60d)
#define XLMAC_PFC_CTRL (0x60e)
-static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
+static void ecore_emul_link_init_bb(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u8 port = p_hwfn->port_id;
- u32 mac_base = NWM_REG_MAC0 + (port << 2) * NWM_REG_MAC0_SIZE;
-
- ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2),
- (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT) |
- (port << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT)
- | (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT));
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE,
- 1 << ETH_MAC_REG_XIF_MODE_XGMII_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH,
- 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH,
- 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS,
- 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT);
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS,
- (0xA << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT) |
- (8 << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT));
-
- ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG, 0xa853);
-}
-
-static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
u8 loopback = 0, port = p_hwfn->port_id * 2;
DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
- if (ECORE_IS_AH(p_hwfn->p_dev)) {
- ecore_emul_link_init_ah(p_hwfn, p_ptt);
- return;
- }
-
/* XLPORT MAC MODE *//* 0 Quad, 4 Single... */
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
port);
@@ -1179,8 +1416,53 @@ static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
}
-static void ecore_link_init(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u8 port)
+static void ecore_emul_link_init_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u8 port = p_hwfn->port_id;
+ u32 mac_base = NWM_REG_MAC0_K2_E5 + (port << 2) * NWM_REG_MAC0_SIZE;
+
+ DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
+
+ ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2_E5 + (port << 2),
+ (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT) |
+ (port <<
+ CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT) |
+ (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE_K2_E5,
+ 1 << ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH_K2_E5,
+ 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH_K2_E5,
+ 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5,
+ 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT);
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5,
+ (0xA <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT) |
+ (8 <<
+ ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT));
+
+ ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG_K2_E5,
+ 0xa853);
+}
+
+static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ ecore_emul_link_init_ah_e5(p_hwfn, p_ptt);
+ else /* BB */
+ ecore_emul_link_init_bb(p_hwfn, p_ptt);
+}
+
+static void ecore_link_init_bb(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 port)
{
int port_offset = port ? 0x800 : 0;
u32 xmac_rxctrl = 0;
@@ -1193,10 +1475,10 @@ static void ecore_link_init(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE_BB, 1);
/* Set the number of ports on the Warp Core to 10G */
- ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE_BB, 3);
/* Soft reset of XMAC */
ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
@@ -1207,70 +1489,24 @@ static void ecore_link_init(struct ecore_hwfn *p_hwfn,
/* FIXME: move to common end */
if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE + port_offset, 0x20);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE_BB + port_offset, 0x20);
/* Set Max packet size: initialize XMAC block register for port 0 */
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE + port_offset, 0x2710);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE_BB + port_offset, 0x2710);
/* CRC append for Tx packets: init XMAC block register for port 1 */
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO + port_offset, 0xC800);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO_BB + port_offset, 0xC800);
/* Enable TX and RX: initialize XMAC block register for port 1 */
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL + port_offset,
- XMAC_REG_CTRL_TX_EN | XMAC_REG_CTRL_RX_EN);
- xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset);
- xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE;
- ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset, xmac_rxctrl);
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL_BB + port_offset,
+ XMAC_REG_CTRL_TX_EN_BB | XMAC_REG_CTRL_RX_EN_BB);
+ xmac_rxctrl = ecore_rd(p_hwfn, p_ptt,
+ XMAC_REG_RX_CTRL_BB + port_offset);
+ xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB;
+ ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL_BB + port_offset, xmac_rxctrl);
}
#endif
-static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- int hw_mode)
-{
- enum _ecore_status_t rc = ECORE_SUCCESS;
-
- rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
- hw_mode);
- if (rc != ECORE_SUCCESS)
- return rc;
-#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
- return ECORE_SUCCESS;
-
- if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
- if (ECORE_IS_AH(p_hwfn->p_dev))
- return ECORE_SUCCESS;
- ecore_link_init(p_hwfn, p_ptt, p_hwfn->port_id);
- } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- if (p_hwfn->p_dev->num_hwfns > 1) {
- /* Activate OPTE in CMT */
- u32 val;
-
- val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
- val |= 0x10;
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
- ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
- ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
- ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
- 0x55555555);
- }
-
- ecore_emul_link_init(p_hwfn, p_ptt);
- } else {
- DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
- }
-#endif
-
- return rc;
-}
-
static enum _ecore_status_t
ecore_hw_init_dpi_size(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
@@ -1339,7 +1575,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
u32 db_bar_size, n_cpus;
u32 roce_edpm_mode;
u32 pf_dems_shift;
- int rc = ECORE_SUCCESS;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
u8 cond;
db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
@@ -1394,8 +1630,9 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
rc = ecore_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
}
- cond = ((rc) && (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
- (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
+ cond = ((rc != ECORE_SUCCESS) &&
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == ECORE_ROCE_EDPM_MODE_DISABLE);
if (cond || p_hwfn->dcbx_no_edpm) {
/* Either EDPM is disabled from user configuration, or it is
* disabled via DCBx, or it is not mandatory and we failed to
@@ -1419,7 +1656,7 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
"disabled" : "enabled");
/* Check return codes from above calls */
- if (rc) {
+ if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn,
"Failed to allocate enough DPIs\n");
return ECORE_NORESOURCES;
@@ -1437,10 +1674,58 @@ ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ int hw_mode)
+{
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+
+ rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
+ hw_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+#ifndef ASIC_ONLY
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+
+ if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
+ if (ECORE_IS_AH(p_hwfn->p_dev))
+ return ECORE_SUCCESS;
+ else if (ECORE_IS_BB(p_hwfn->p_dev))
+ ecore_link_init_bb(p_hwfn, p_ptt, p_hwfn->port_id);
+ } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
+ if (p_hwfn->p_dev->num_hwfns > 1) {
+ /* Activate OPTE in CMT */
+ u32 val;
+
+ val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
+ val |= 0x10;
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
+ ecore_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
+ 0x55555555);
+ }
+
+ ecore_emul_link_init(p_hwfn, p_ptt);
+ } else {
+ DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
+ }
+#endif
+
+ return rc;
+}
+
static enum _ecore_status_t
ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
int hw_mode,
bool b_hw_start,
enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
@@ -1532,7 +1817,9 @@ ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
return rc;
if (b_hw_start) {
/* enable interrupts */
- ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ rc = ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
+ if (rc != ECORE_SUCCESS)
+ return rc;
/* send function start command */
rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
@@ -1618,14 +1905,31 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
p_hwfn->mcp_info->mfw_mb_length);
}
+enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_hw_init_params *p_params)
+{
+ if (p_params->p_tunn) {
+ ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = 1;
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
struct ecore_hw_init_params *p_params)
{
- enum _ecore_status_t rc, mfw_rc;
- u32 load_code, param;
- int i, j;
+ struct ecore_load_req_params load_req_params;
+ u32 load_code, param, drv_mb_param;
+ bool b_default_mtu = true;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc;
+ int i;
- if (p_params->int_mode == ECORE_INT_MODE_MSI && p_dev->num_hwfns > 1) {
+ if ((p_params->int_mode == ECORE_INT_MODE_MSI) &&
+ (p_dev->num_hwfns > 1)) {
DP_NOTICE(p_dev, false,
"MSI mode is not supported for CMT devices\n");
return ECORE_INVAL;
@@ -1640,8 +1944,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+ /* If management didn't provide a default, set one of our own */
+ if (!p_hwfn->hw_info.mtu) {
+ p_hwfn->hw_info.mtu = 1500;
+ b_default_mtu = false;
+ }
+
if (IS_VF(p_dev)) {
- p_hwfn->b_int_enabled = 1;
+ ecore_vf_start(p_hwfn, p_params);
continue;
}
@@ -1654,33 +1964,37 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
if (rc != ECORE_SUCCESS)
return rc;
- /* @@@TBD need to add here:
- * Check for fan failure
- * Prev_unload
- */
- rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
- if (rc) {
+ OSAL_MEM_ZERO(&load_req_params, sizeof(load_req_params));
+ load_req_params.drv_role = p_params->is_crash_kernel ?
+ ECORE_DRV_ROLE_KDUMP :
+ ECORE_DRV_ROLE_OS;
+ load_req_params.timeout_val = p_params->mfw_timeout_val;
+ load_req_params.avoid_eng_reset = p_params->avoid_eng_reset;
+ rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "Failed sending LOAD_REQ command\n");
+ "Failed sending a LOAD_REQ command\n");
return rc;
}
+ load_code = load_req_params.load_code;
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
/* CQ75580:
* When coming back from hiberbate state, the registers from
* which shadow is read initially are not initialized. It turns
* out that these registers get initialized during the call to
* ecore_mcp_load_req request. So we need to reread them here
* to get the proper shadow register value.
- * Note: This is a workaround for the missinginig MFW
+ * Note: This is a workaround for the missing MFW
* initialization. It may be removed once the implementation
* is done.
*/
ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
- rc, load_code);
-
/* Only relevant for recovery:
* Clear the indication after the LOAD_REQ command is responded
* by the MFW.
@@ -1699,33 +2013,14 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
- if (rc)
+ if (rc != ECORE_SUCCESS)
break;
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_PORT:
rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
p_hwfn->hw_info.hw_mode);
- if (rc)
+ if (rc != ECORE_SUCCESS)
break;
-
-#ifndef REAL_ASIC_ONLY
- if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
- struct init_nig_pri_tc_map_req tc_map;
-
- OSAL_MEM_ZERO(&tc_map, sizeof(tc_map));
-
- /* remove this once flow control is
- * implemented
- */
- for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) {
- tc_map.pri[j].tc_id = 0;
- tc_map.pri[j].valid = 1;
- }
- ecore_init_nig_pri_tc_map(p_hwfn,
- p_hwfn->p_main_ptt,
- &tc_map);
- }
-#endif
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
@@ -1736,6 +2031,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_params->allow_npar_tx_switch);
break;
default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected load code [0x%08x]", load_code);
rc = ECORE_NOTIMPL;
break;
}
@@ -1751,16 +2048,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
0, &load_code, &param);
if (rc != ECORE_SUCCESS)
return rc;
+
if (mfw_rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "Failed sending LOAD_DONE command\n");
+ "Failed sending a LOAD_DONE command\n");
return mfw_rc;
}
- ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt);
- ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
- p_params->epoch);
-
/* send DCBX attention request command */
DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
"sending phony dcbx set command to trigger DCBx attention handling\n");
@@ -1777,7 +2071,29 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
p_hwfn->hw_init_done = true;
}
- return ECORE_SUCCESS;
+ if (IS_PF(p_dev)) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ drv_mb_param = STORM_FW_VERSION;
+ rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
+ drv_mb_param, &load_code, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update firmware version\n");
+
+ if (!b_default_mtu)
+ rc = ecore_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.mtu);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update default mtu\n");
+
+ rc = ecore_mcp_ov_update_driver_state(p_hwfn,
+ p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_DISABLED);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn, "Failed to update driver state\n");
+ }
+
+ return rc;
}
#define ECORE_HW_STOP_RETRY_LIMIT (10)
@@ -1802,13 +2118,14 @@ static void ecore_hw_timers_stop(struct ecore_dev *p_dev,
*/
OSAL_MSLEEP(1);
}
- if (i == ECORE_HW_STOP_RETRY_LIMIT)
- DP_NOTICE(p_hwfn, true,
- "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
- (u8)ecore_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_CONN),
- (u8)ecore_rd(p_hwfn, p_ptt,
- TM_REG_PF_SCAN_ACTIVE_TASK));
+
+ if (i < ECORE_HW_STOP_RETRY_LIMIT)
+ return;
+
+ DP_NOTICE(p_hwfn, true, "Timers linear scans are not over"
+ " [Connection %02x Tasks %02x]\n",
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
}
void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
@@ -1823,32 +2140,77 @@ void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
}
}
+static enum _ecore_status_t ecore_verify_reg_val(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 addr, u32 expected_val)
+{
+ u32 val = ecore_rd(p_hwfn, p_ptt, addr);
+
+ if (val != expected_val) {
+ DP_NOTICE(p_hwfn, true,
+ "Value at address 0x%08x is 0x%08x while the expected value is 0x%08x\n",
+ addr, val, expected_val);
+ return ECORE_UNKNOWN_ERROR;
+ }
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
{
- enum _ecore_status_t rc = ECORE_SUCCESS, t_rc;
+ struct ecore_hwfn *p_hwfn;
+ struct ecore_ptt *p_ptt;
+ enum _ecore_status_t rc, rc2 = ECORE_SUCCESS;
int j;
for_each_hwfn(p_dev, j) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
- struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
+ p_hwfn = &p_dev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(p_dev)) {
ecore_vf_pf_int_cleanup(p_hwfn);
+ rc = ecore_vf_pf_reset(p_hwfn);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
continue;
}
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
+ /* Send unload command to MCP */
+ if (!p_dev->recov_in_prog) {
+ rc = ecore_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+
+ OSAL_DPC_SYNC(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
+
rc = ecore_sp_pf_stop(p_hwfn);
- if (rc)
+ if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
/* perform debug action after PF stop was sent */
- OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
+ OSAL_AFTER_PF_STOP((void *)p_dev, p_hwfn->my_id);
/* close NIG to BRB gate */
ecore_wr(p_hwfn, p_ptt,
@@ -1875,20 +2237,48 @@ enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
/* Need to wait 1ms to guarantee SBs are cleared */
OSAL_MSLEEP(1);
- }
+
+ if (!p_dev->recov_in_prog) {
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_TX, 0);
+ ecore_verify_reg_val(p_hwfn, p_ptt,
+ QM_REG_USG_CNT_PF_OTHER, 0);
+ /* @@@TBD - assert on incorrect xCFC values (10.b) */
+ }
+
+ /* Disable PF in HW blocks */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ ecore_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ if (!p_dev->recov_in_prog) {
+ ecore_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
+ }
+ } /* hwfn loop */
if (IS_PF(p_dev)) {
+ p_hwfn = ECORE_LEADING_HWFN(p_dev);
+ p_ptt = ECORE_LEADING_HWFN(p_dev)->p_main_ptt;
+
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
*/
- t_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],
- p_dev->hwfns[0].p_main_ptt, false);
- if (t_rc != ECORE_SUCCESS)
- rc = t_rc;
+ rc = ecore_change_pci_hwfn(p_hwfn, p_ptt, false);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "ecore_change_pci_hwfn failed. rc = %d.\n",
+ rc);
+ rc2 = ECORE_UNKNOWN_ERROR;
+ }
}
- return rc;
+ return rc2;
}
void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
@@ -1949,84 +2339,6 @@ void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
}
-static enum _ecore_status_t ecore_reg_assert(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt, u32 reg,
- bool expected)
-{
- u32 assert_val = ecore_rd(p_hwfn, p_ptt, reg);
-
- if (assert_val != expected) {
- DP_NOTICE(p_hwfn, true, "Value at address 0x%08x != 0x%08x\n",
- reg, expected);
- return ECORE_UNKNOWN_ERROR;
- }
-
- return 0;
-}
-
-enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
-{
- enum _ecore_status_t rc = ECORE_SUCCESS;
- u32 unload_resp, unload_param;
- int i;
-
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- if (IS_VF(p_dev)) {
- rc = ecore_vf_pf_reset(p_hwfn);
- if (rc)
- return rc;
- continue;
- }
-
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n");
-
- /* Check for incorrect states */
- if (!p_dev->recov_in_prog) {
- ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_TX, 0);
- ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
- QM_REG_USG_CNT_PF_OTHER, 0);
- /* @@@TBD - assert on incorrect xCFC values (10.b) */
- }
-
- /* Disable PF in HW blocks */
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
-
- if (p_dev->recov_in_prog) {
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
- "Recovery is in progress -> skip sending unload_req/done\n");
- break;
- }
-
- /* Send unload command to MCP */
- rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_REQ,
- DRV_MB_PARAM_UNLOAD_WOL_MCP,
- &unload_resp, &unload_param);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true,
- "ecore_hw_reset: UNLOAD_REQ failed\n");
- /* @@TBD - what to do? for now, assume ENG. */
- unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
- }
-
- rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
- DRV_MSG_CODE_UNLOAD_DONE,
- 0, &unload_resp, &unload_param);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn,
- true, "ecore_hw_reset: UNLOAD_DONE failed\n");
- /* @@@TBD - Should it really ASSERT here ? */
- return rc;
- }
- }
-
- return rc;
-}
-
/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
{
@@ -2040,22 +2352,22 @@ static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
/* clear indirect access */
if (ECORE_IS_AH(p_hwfn->p_dev)) {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_E8_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_EC_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_F0_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_F4_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5, 0);
} else {
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_88_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_90_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_PGL_ADDR_94_F0, 0);
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
}
/* Clean Previous errors if such exist */
@@ -2090,6 +2402,7 @@ static void get_function_id(struct ecore_hwfn *p_hwfn)
static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
{
u32 *feat_num = p_hwfn->hw_info.feat_num;
+ struct ecore_sb_cnt_info sb_cnt_info;
int num_features = 1;
/* L2 Queues require each: 1 status block. 1 L2 queue */
@@ -2098,145 +2411,226 @@ static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
RESC_NUM(p_hwfn, ECORE_SB) / num_features,
RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+ OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
+ ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
+ feat_num[ECORE_VF_L2_QUE] =
+ OSAL_MIN_T(u32,
+ RESC_NUM(p_hwfn, ECORE_L2_QUEUE) -
+ FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
+ sb_cnt_info.sb_iov_cnt);
+
+ feat_num[ECORE_FCOE_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+ feat_num[ECORE_ISCSI_CQ] = OSAL_MIN_T(u32, RESC_NUM(p_hwfn, ECORE_SB),
+ RESC_NUM(p_hwfn, ECORE_CMDQS_CQS));
+
DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
- "#PF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
- feat_num[ECORE_PF_L2_QUE],
- feat_num[ECORE_RDMA_CNQ],
- RESC_NUM(p_hwfn, ECORE_SB), num_features);
+ "#PF_L2_QUEUE=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #FCOE_CQ=%d #ISCSI_CQ=%d #SB=%d\n",
+ (int)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, ECORE_RDMA_CNQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_FCOE_CQ),
+ (int)FEAT_NUM(p_hwfn, ECORE_ISCSI_CQ),
+ RESC_NUM(p_hwfn, ECORE_SB));
}
-static enum resource_id_enum
-ecore_hw_get_mfw_res_id(enum ecore_resources res_id)
+const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
{
- enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
-
switch (res_id) {
case ECORE_SB:
- mfw_res_id = RESOURCE_NUM_SB_E;
- break;
+ return "SB";
case ECORE_L2_QUEUE:
- mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
- break;
+ return "L2_QUEUE";
case ECORE_VPORT:
- mfw_res_id = RESOURCE_NUM_VPORT_E;
- break;
+ return "VPORT";
case ECORE_RSS_ENG:
- mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
- break;
+ return "RSS_ENG";
case ECORE_PQ:
- mfw_res_id = RESOURCE_NUM_PQ_E;
- break;
+ return "PQ";
case ECORE_RL:
- mfw_res_id = RESOURCE_NUM_RL_E;
- break;
+ return "RL";
case ECORE_MAC:
+ return "MAC";
case ECORE_VLAN:
- /* Each VFC resource can accommodate both a MAC and a VLAN */
- mfw_res_id = RESOURCE_VFC_FILTER_E;
- break;
+ return "VLAN";
+ case ECORE_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
case ECORE_ILT:
- mfw_res_id = RESOURCE_ILT_E;
- break;
+ return "ILT";
case ECORE_LL2_QUEUE:
- mfw_res_id = RESOURCE_LL2_QUEUE_E;
- break;
- case ECORE_RDMA_CNQ_RAM:
+ return "LL2_QUEUE";
case ECORE_CMDQS_CQS:
- /* CNQ/CMDQS are the same resource */
- mfw_res_id = RESOURCE_CQS_E;
- break;
+ return "CMDQS_CQS";
case ECORE_RDMA_STATS_QUEUE:
- mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
- break;
+ return "RDMA_STATS_QUEUE";
+ case ECORE_BDQ:
+ return "BDQ";
default:
- break;
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static enum _ecore_status_t
+__ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_set_resc_max_val(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_hw_set_soft_resc_size(struct ecore_hwfn *p_hwfn)
+{
+ bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ enum _ecore_status_t rc;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ /* @DPDK */
+ switch (res_id) {
+ case ECORE_LL2_QUEUE:
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_RDMA_STATS_QUEUE:
+ case ECORE_BDQ:
+ resc_max_val = 0;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __ecore_hw_set_soft_resc_size(p_hwfn, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return ECORE_NOTIMPL;
}
- return mfw_res_id;
+ return ECORE_SUCCESS;
}
-static u32 ecore_hw_get_dflt_resc_num(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id)
+static
+enum _ecore_status_t ecore_hw_get_dflt_resc(struct ecore_hwfn *p_hwfn,
+ enum ecore_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
{
u8 num_funcs = p_hwfn->num_funcs_on_engine;
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
struct ecore_sb_cnt_info sb_cnt_info;
- u32 dflt_resc_num = 0;
switch (res_id) {
case ECORE_SB:
OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
- dflt_resc_num = sb_cnt_info.sb_cnt;
+ *p_resc_num = sb_cnt_info.sb_cnt;
break;
case ECORE_L2_QUEUE:
- dflt_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
+ *p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
MAX_NUM_L2_QUEUES_BB) / num_funcs;
break;
case ECORE_VPORT:
- dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
MAX_NUM_VPORTS_BB) / num_funcs;
break;
case ECORE_RSS_ENG:
- dflt_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
+ *p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
ETH_RSS_ENGINE_NUM_BB) / num_funcs;
break;
case ECORE_PQ:
- dflt_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
+ *p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
MAX_QM_TX_QUEUES_BB) / num_funcs;
break;
case ECORE_RL:
- dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
+ *p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
break;
case ECORE_MAC:
case ECORE_VLAN:
/* Each VFC resource can accommodate both a MAC and a VLAN */
- dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
break;
case ECORE_ILT:
- dflt_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
+ *p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
PXP_NUM_ILT_RECORDS_BB) / num_funcs;
break;
case ECORE_LL2_QUEUE:
- dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
+ *p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
break;
case ECORE_RDMA_CNQ_RAM:
case ECORE_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
/* @DPDK */
- dflt_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
+ *p_resc_num = (NUM_OF_GLOBAL_QUEUES / 2) / num_funcs;
break;
case ECORE_RDMA_STATS_QUEUE:
/* @DPDK */
- dflt_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
+ *p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
MAX_NUM_VPORTS_BB) / num_funcs;
break;
+ case ECORE_BDQ:
+ /* @DPDK */
+ *p_resc_num = 0;
+ break;
default:
break;
}
- return dflt_resc_num;
+
+ switch (res_id) {
+ case ECORE_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ break;
+ default:
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
+ }
+
+ return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
- enum ecore_resources res_id,
- bool drv_resc_alloc)
+static enum _ecore_status_t
+__ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn, enum ecore_resources res_id,
+ bool drv_resc_alloc)
{
- u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
- u32 *p_resc_num, *p_resc_start;
- struct resource_info resc_info;
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
enum _ecore_status_t rc;
p_resc_num = &RESC_NUM(p_hwfn, res_id);
p_resc_start = &RESC_START(p_hwfn, res_id);
- dflt_resc_num = ecore_hw_get_dflt_resc_num(p_hwfn, res_id);
- if (!dflt_resc_num) {
- DP_ERR(p_hwfn, "Failed to get default amount for resource %d\n",
- res_id);
- return ECORE_INVAL;
+ rc = ecore_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(p_hwfn,
+ "Failed to get default amount for resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
+ return rc;
}
- dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
@@ -2246,21 +2640,13 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
}
#endif
- OSAL_MEM_ZERO(&resc_info, sizeof(resc_info));
- resc_info.res_id = ecore_hw_get_mfw_res_id(res_id);
- if (resc_info.res_id == RESOURCE_NUM_INVALID) {
- DP_ERR(p_hwfn,
- "Failed to match resource %d with MFW resources\n",
- res_id);
- return ECORE_INVAL;
- }
-
- rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
- &mcp_resp, &mcp_param);
+ rc = ecore_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true,
- "MFW resp failure for a resc alloc req [res_id %d]\n",
- res_id);
+ "MFW response failure for an allocation request for"
+ " resource %d [%s]\n",
+ res_id, ecore_hw_get_resc_name(res_id));
return rc;
}
@@ -2269,15 +2655,13 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
* - There is an internal error in the MFW while processing the request
* - The resource ID is unknown to the MFW
*/
- if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
- mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
- /* @DPDK */
- DP_INFO(p_hwfn,
- "No allocation info for resc %d [mcp_resp 0x%x].",
- res_id, mcp_resp);
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
DP_INFO(p_hwfn,
- "Applying default values [num %d, start %d].\n",
- dflt_resc_num, dflt_resc_start);
+ "Failed to receive allocation info for resource %d [%s]."
+ " mcp_resp = 0x%x. Applying default values"
+ " [%d,%d].\n",
+ res_id, ecore_hw_get_resc_name(res_id), mcp_resp,
+ dflt_resc_num, dflt_resc_start);
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
@@ -2287,71 +2671,51 @@ static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
/* TBD - remove this when revising the handling of the SB resource */
if (res_id == ECORE_SB) {
/* Excluding the slowpath SB */
- resc_info.size -= 1;
- resc_info.offset -= p_hwfn->enabled_func_idx;
+ *p_resc_num -= 1;
+ *p_resc_start -= p_hwfn->enabled_func_idx;
}
- *p_resc_num = resc_info.size;
- *p_resc_start = resc_info.offset;
-
if (*p_resc_num != dflt_resc_num || *p_resc_start != dflt_resc_start) {
- DP_NOTICE(p_hwfn, false,
- "Resource %d: MFW allocation [num %d, start %d]",
- res_id, *p_resc_num, *p_resc_start);
- DP_NOTICE(p_hwfn, false,
- "differs from default values [num %d, start %d]%s\n",
- dflt_resc_num,
- dflt_resc_start,
- drv_resc_alloc ? " - applying default values" : "");
+ DP_INFO(p_hwfn,
+ "MFW allocation for resource %d [%s] differs from default values [%d,%d vs. %d,%d]%s\n",
+ res_id, ecore_hw_get_resc_name(res_id), *p_resc_num,
+ *p_resc_start, dflt_resc_num, dflt_resc_start,
+ drv_resc_alloc ? " - Applying default values" : "");
if (drv_resc_alloc) {
*p_resc_num = dflt_resc_num;
*p_resc_start = dflt_resc_start;
}
}
- out:
+out:
return ECORE_SUCCESS;
}
-static const char *ecore_hw_get_resc_name(enum ecore_resources res_id)
+static enum _ecore_status_t ecore_hw_set_resc_info(struct ecore_hwfn *p_hwfn,
+ bool drv_resc_alloc)
{
- switch (res_id) {
- case ECORE_SB:
- return "SB";
- case ECORE_L2_QUEUE:
- return "L2_QUEUE";
- case ECORE_VPORT:
- return "VPORT";
- case ECORE_RSS_ENG:
- return "RSS_ENG";
- case ECORE_PQ:
- return "PQ";
- case ECORE_RL:
- return "RL";
- case ECORE_MAC:
- return "MAC";
- case ECORE_VLAN:
- return "VLAN";
- case ECORE_RDMA_CNQ_RAM:
- return "RDMA_CNQ_RAM";
- case ECORE_ILT:
- return "ILT";
- case ECORE_LL2_QUEUE:
- return "LL2_QUEUE";
- case ECORE_CMDQS_CQS:
- return "CMDQS_CQS";
- case ECORE_RDMA_STATS_QUEUE:
- return "RDMA_STATS_QUEUE";
- default:
- return "UNKNOWN_RESOURCE";
+ enum _ecore_status_t rc;
+ u8 res_id;
+
+ for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
+ rc = __ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ return rc;
}
+
+ return ECORE_SUCCESS;
}
+#define ECORE_RESC_ALLOC_LOCK_RETRY_CNT 10
+#define ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US 10000 /* 10 msec */
+
static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
bool drv_resc_alloc)
{
+ struct ecore_resc_unlock_params resc_unlock_params;
+ struct ecore_resc_lock_params resc_lock_params;
bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
- enum _ecore_status_t rc;
u8 res_id;
+ enum _ecore_status_t rc;
#ifndef ASIC_ONLY
u32 *resc_start = p_hwfn->hw_info.resc_start;
u32 *resc_num = p_hwfn->hw_info.resc_num;
@@ -2364,10 +2728,62 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
u32 roce_min_ilt_lines = PXP_NUM_ILT_RECORDS_BB / MAX_NUM_PFS_BB;
#endif
- for (res_id = 0; res_id < ECORE_MAX_RESC; res_id++) {
- rc = ecore_hw_set_resc_info(p_hwfn, res_id, drv_resc_alloc);
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ * Old drivers that don't acquire the lock can run in parallel, and
+ * their allocation values won't be affected by the updated max values.
+ */
+ OSAL_MEM_ZERO(&resc_lock_params, sizeof(resc_lock_params));
+ resc_lock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
+ resc_lock_params.retry_num = ECORE_RESC_ALLOC_LOCK_RETRY_CNT;
+ resc_lock_params.retry_interval = ECORE_RESC_ALLOC_LOCK_RETRY_INTVL_US;
+ resc_lock_params.sleep_b4_retry = true;
+ OSAL_MEM_ZERO(&resc_unlock_params, sizeof(resc_unlock_params));
+ resc_unlock_params.resource = ECORE_RESC_LOCK_RESC_ALLOC;
+
+ rc = ecore_mcp_resc_lock(p_hwfn, p_hwfn->p_main_ptt, &resc_lock_params);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ return rc;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (rc == ECORE_SUCCESS && !resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ rc = ECORE_BUSY;
+ goto unlock_and_exit;
+ } else {
+ rc = ecore_hw_set_soft_resc_size(p_hwfn);
+ if (rc != ECORE_SUCCESS && rc != ECORE_NOTIMPL) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == ECORE_NOTIMPL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ &resc_unlock_params);
+ if (rc != ECORE_SUCCESS)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = ecore_hw_set_resc_info(p_hwfn, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt,
+ &resc_unlock_params);
if (rc != ECORE_SUCCESS)
- return rc;
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
}
#ifndef ASIC_ONLY
@@ -2420,14 +2836,21 @@ static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn,
RESC_START(p_hwfn, res_id));
return ECORE_SUCCESS;
+
+unlock_and_exit:
+ ecore_mcp_resc_unlock(p_hwfn, p_hwfn->p_main_ptt, &resc_unlock_params);
+ return rc;
}
-static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static enum _ecore_status_t
+ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_hw_prepare_params *p_params)
{
- u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg, dcbx_mode;
u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
struct ecore_mcp_link_params *link;
+ enum _ecore_status_t rc;
/* Read global nvm_cfg address */
nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
@@ -2435,6 +2858,8 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
/* Verify MCP has initialized it */
if (!nvm_cfg_addr) {
DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_NVM;
return ECORE_INVAL;
}
@@ -2474,6 +2899,9 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
break;
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X10G;
+ break;
case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
break;
@@ -2486,6 +2914,28 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
break;
}
+ /* Read DCBX configuration */
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ dcbx_mode = ecore_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ OFFSETOF(struct nvm_cfg1_port, generic_cont0));
+ dcbx_mode = (dcbx_mode & NVM_CFG1_PORT_DCBX_MODE_MASK)
+ >> NVM_CFG1_PORT_DCBX_MODE_OFFSET;
+ switch (dcbx_mode) {
+ case NVM_CFG1_PORT_DCBX_MODE_DYNAMIC:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DYNAMIC;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_CEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_CEE;
+ break;
+ case NVM_CFG1_PORT_DCBX_MODE_IEEE:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_IEEE;
+ break;
+ default:
+ p_hwfn->hw_info.dcbx_mode = ECORE_DCBX_VERSION_DISABLED;
+ }
+
/* Read default link configuration */
link = &p_hwfn->mcp_info->link_input;
port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
@@ -2595,7 +3045,13 @@ static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
OSAL_SET_BIT(ECORE_DEV_CAP_IWARP,
&p_hwfn->hw_info.device_capabilities);
- return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+ rc = ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
+
+ return rc;
}
static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
@@ -2615,7 +3071,12 @@ static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
* In case of CMT in BB, only the "even" functions are enabled, and thus
* the number of functions for both hwfns is learnt from the same bits.
*/
- reg_function_hide = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+ if (ECORE_IS_BB(p_dev) || ECORE_IS_AH(p_dev)) {
+ reg_function_hide = ecore_rd(p_hwfn, p_ptt,
+ MISCS_REG_FUNCTION_HIDE_BB_K2);
+ } else { /* E5 */
+ reg_function_hide = 0;
+ }
if (reg_function_hide & 0x1) {
if (ECORE_IS_BB(p_dev)) {
@@ -2681,8 +3142,7 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
port_mode = 1;
else
#endif
- port_mode = ecore_rd(p_hwfn, p_ptt,
- CNIG_REG_NW_PORT_MODE_BB_B0);
+ port_mode = ecore_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
if (port_mode < 3) {
p_hwfn->p_dev->num_ports_in_engines = 1;
@@ -2697,8 +3157,8 @@ static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
}
}
-static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+static void ecore_hw_info_port_num_ah_e5(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
u32 port;
int i;
@@ -2727,7 +3187,8 @@ static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
#endif
for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
port = ecore_rd(p_hwfn, p_ptt,
- CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
+ CNIG_REG_NIG_PORT0_CONF_K2_E5 +
+ (i * 4));
if (port & 1)
p_hwfn->p_dev->num_ports_in_engines++;
}
@@ -2739,20 +3200,27 @@ static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
if (ECORE_IS_BB(p_hwfn->p_dev))
ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
else
- ecore_hw_info_port_num_ah(p_hwfn, p_ptt);
+ ecore_hw_info_port_num_ah_e5(p_hwfn, p_ptt);
}
static enum _ecore_status_t
ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- enum ecore_pci_personality personality, bool drv_resc_alloc)
+ enum ecore_pci_personality personality,
+ struct ecore_hw_prepare_params *p_params)
{
+ bool drv_resc_alloc = p_params->drv_resc_alloc;
enum _ecore_status_t rc;
/* Since all information is common, only first hwfns should do this */
if (IS_LEAD_HWFN(p_hwfn)) {
rc = ecore_iov_hw_info(p_hwfn);
- if (rc)
- return rc;
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_BAD_IOV;
+ else
+ return rc;
+ }
}
/* TODO In get_hw_info, amoungst others:
@@ -2765,13 +3233,22 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_hw_info_port_num(p_hwfn, p_ptt);
#ifndef ASIC_ONLY
- if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
+ if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) {
+#endif
+ rc = ecore_hw_get_nvm_info(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+#ifndef ASIC_ONLY
+ }
#endif
- ecore_hw_get_nvm_info(p_hwfn, p_ptt);
rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
- if (rc)
- return rc;
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_IGU;
+ else
+ return rc;
+ }
#ifndef ASIC_ONLY
if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
@@ -2795,11 +3272,14 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
}
- if (personality != ECORE_PCI_DEFAULT)
+ if (personality != ECORE_PCI_DEFAULT) {
p_hwfn->hw_info.personality = personality;
- else if (ecore_mcp_is_init(p_hwfn))
- p_hwfn->hw_info.personality =
- p_hwfn->mcp_info->func_info.protocol;
+ } else if (ecore_mcp_is_init(p_hwfn)) {
+ enum ecore_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
#ifndef ASIC_ONLY
/* To overcome ILT lack for emulation, until at least until we'll have
@@ -2826,18 +3306,23 @@ ecore_get_hw_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
ecore_get_num_funcs(p_hwfn, p_ptt);
+ if (ecore_mcp_is_init(p_hwfn))
+ p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
+
/* In case of forcing the driver's default resource allocation, calling
* ecore_hw_get_resc() should come after initializing the personality
* and after getting the number of functions, since the calculation of
* the resources/features depends on them.
* This order is not harmful if not forcing.
*/
- return ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
-}
+ rc = ecore_hw_get_resc(p_hwfn, drv_resc_alloc);
+ if (rc != ECORE_SUCCESS && p_params->b_relaxed_probe) {
+ rc = ECORE_SUCCESS;
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_BAD_MCP;
+ }
-#define ECORE_DEV_ID_MASK 0xff00
-#define ECORE_DEV_ID_MASK_BB 0x1600
-#define ECORE_DEV_ID_MASK_AH 0x8000
+ return rc;
+}
static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
{
@@ -2892,9 +3377,9 @@ static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
MISCS_REG_CHIP_METAL);
MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
DP_INFO(p_dev->hwfns,
- "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
ECORE_IS_BB(p_dev) ? "BB" : "AH",
- CHIP_REV_IS_A0(p_dev) ? 0 : 1,
+ 'A' + p_dev->chip_rev, (int)p_dev->chip_metal,
p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
p_dev->chip_metal);
@@ -2948,11 +3433,13 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev)
#endif
static enum _ecore_status_t
-ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
- void OSAL_IOMEM *p_doorbells,
+ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * p_regview,
+ void OSAL_IOMEM * p_doorbells,
struct ecore_hw_prepare_params *p_params)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
+ struct ecore_mdump_info mdump_info;
enum _ecore_status_t rc = ECORE_SUCCESS;
/* Split PCI bars evenly between hwfns */
@@ -2966,6 +3453,8 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn,
"Reading the ME register returns all Fs; Preventing further chip access\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_ME;
return ECORE_INVAL;
}
@@ -2975,6 +3464,8 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
rc = ecore_ptt_pool_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err0;
}
@@ -2984,8 +3475,12 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
/* First hwfn learns basic information, e.g., number of hwfns */
if (!p_hwfn->my_id) {
rc = ecore_get_dev_info(p_dev);
- if (rc != ECORE_SUCCESS)
+ if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_DEV;
goto err1;
+ }
}
ecore_hw_hwfn_prepare(p_hwfn);
@@ -2994,50 +3489,72 @@ ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, void OSAL_IOMEM *p_regview,
rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err1;
}
- if (p_hwfn == ECORE_LEADING_HWFN(p_dev) && !p_dev->recov_in_prog) {
- rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
- if (rc != ECORE_SUCCESS)
- DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
- }
-
/* Read the device configuration information from the HW and SHMEM */
rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt,
- p_params->personality, p_params->drv_resc_alloc);
+ p_params->personality, p_params);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
goto err2;
}
+ /* Sending a mailbox to the MFW should be after ecore_get_hw_info() is
+ * called, since among others it sets the ports number in an engine.
+ */
+ if (p_params->initiate_pf_flr && p_hwfn == ECORE_LEADING_HWFN(p_dev) &&
+ !p_dev->recov_in_prog) {
+ rc = ecore_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false, "Failed to initiate PF FLR\n");
+ }
+
+ /* Check if mdump logs are present and update the epoch value */
+ if (p_hwfn == ECORE_LEADING_HWFN(p_hwfn->p_dev)) {
+ rc = ecore_mcp_mdump_get_info(p_hwfn, p_hwfn->p_main_ptt,
+ &mdump_info);
+ if (rc == ECORE_SUCCESS && mdump_info.num_of_logs > 0) {
+ DP_NOTICE(p_hwfn, false,
+ "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+ }
+
+ ecore_mcp_mdump_set_values(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->epoch);
+ }
+
/* Allocate the init RT array and initialize the init-ops engine */
rc = ecore_init_alloc(p_hwfn);
if (rc) {
DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_FAILED_MEM;
goto err2;
}
#ifndef ASIC_ONLY
if (CHIP_REV_IS_FPGA(p_dev)) {
DP_NOTICE(p_hwfn, false,
"FPGA: workaround; Prevent DMAE parities\n");
- ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7);
+ ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK_K2_E5,
+ 7);
DP_NOTICE(p_hwfn, false,
"FPGA: workaround: Set VF bar0 size\n");
ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
- PGLUE_B_REG_VF_BAR0_SIZE, 4);
+ PGLUE_B_REG_VF_BAR0_SIZE_K2_E5, 4);
}
#endif
return rc;
- err2:
+err2:
if (IS_LEAD_HWFN(p_hwfn))
ecore_iov_free_hw_info(p_dev);
ecore_mcp_free(p_hwfn);
- err1:
+err1:
ecore_hw_hwfn_free(p_hwfn);
- err0:
+err0:
return rc;
}
@@ -3049,6 +3566,9 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
p_dev->chk_reg_fifo = p_params->chk_reg_fifo;
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res = ECORE_HW_PREPARE_SUCCESS;
+
/* Store the precompiled init data ptrs */
if (IS_PF(p_dev))
ecore_init_iro_array(p_dev);
@@ -3084,6 +3604,10 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
* initiliazed hwfn 0.
*/
if (rc != ECORE_SUCCESS) {
+ if (p_params->b_relaxed_probe)
+ p_params->p_relaxed_res =
+ ECORE_HW_PREPARE_FAILED_ENG2;
+
if (IS_PF(p_dev)) {
ecore_init_free(p_hwfn);
ecore_mcp_free(p_hwfn);
@@ -3096,13 +3620,18 @@ enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev,
}
}
- return ECORE_SUCCESS;
+ return rc;
}
void ecore_hw_remove(struct ecore_dev *p_dev)
{
+ struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
int i;
+ if (IS_PF(p_dev))
+ ecore_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
+ ECORE_OV_DRIVER_STATE_NOT_LOADED);
+
for_each_hwfn(p_dev, i) {
struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
@@ -3164,13 +3693,13 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
struct ecore_chain *p_chain)
{
void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
- u8 *p_pbl_virt = (u8 *)p_chain->pbl.p_virt_table;
+ u8 *p_pbl_virt = (u8 *)p_chain->pbl_sp.p_virt_table;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
if (!pp_virt_addr_tbl)
return;
- if (!p_chain->pbl.p_virt_table)
+ if (!p_pbl_virt)
goto out;
for (i = 0; i < page_cnt; i++) {
@@ -3185,8 +3714,10 @@ static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
}
pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
- OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
- p_chain->pbl.p_phys_table, pbl_size);
+
+ if (!p_chain->b_external_pbl)
+ OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl_sp.p_virt_table,
+ p_chain->pbl_sp.p_phys_table, pbl_size);
out:
OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
}
@@ -3271,8 +3802,8 @@ ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
static enum _ecore_status_t
ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
{
- void *p_virt = OSAL_NULL;
dma_addr_t p_phys = 0;
+ void *p_virt = OSAL_NULL;
p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
if (!p_virt) {
@@ -3286,8 +3817,10 @@ ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
return ECORE_SUCCESS;
}
-static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
- struct ecore_chain *p_chain)
+static enum _ecore_status_t
+ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
{
void *p_virt = OSAL_NULL;
u8 *p_pbl_virt = OSAL_NULL;
@@ -3296,13 +3829,12 @@ static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
u32 page_cnt = p_chain->page_cnt, size, i;
size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size);
+ pp_virt_addr_tbl = (void **)OSAL_VZALLOC(p_dev, size);
if (!pp_virt_addr_tbl) {
DP_NOTICE(p_dev, true,
"Failed to allocate memory for the chain virtual addresses table\n");
return ECORE_NOMEM;
}
- OSAL_MEM_ZERO(pp_virt_addr_tbl, size);
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
@@ -3311,7 +3843,15 @@ static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
- p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+
+ if (ext_pbl == OSAL_NULL) {
+ p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
+ } else {
+ p_pbl_virt = ext_pbl->p_pbl_virt;
+ p_pbl_phys = ext_pbl->p_pbl_phys;
+ p_chain->b_external_pbl = true;
+ }
+
ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl);
if (!p_pbl_virt) {
@@ -3349,7 +3889,8 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
enum ecore_chain_mode mode,
enum ecore_chain_cnt_type cnt_type,
u32 num_elems, osal_size_t elem_size,
- struct ecore_chain *p_chain)
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl)
{
u32 page_cnt;
enum _ecore_status_t rc = ECORE_SUCCESS;
@@ -3380,7 +3921,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
rc = ecore_chain_alloc_single(p_dev, p_chain);
break;
case ECORE_CHAIN_MODE_PBL:
- rc = ecore_chain_alloc_pbl(p_dev, p_chain);
+ rc = ecore_chain_alloc_pbl(p_dev, p_chain, ext_pbl);
break;
}
if (rc)
@@ -3388,7 +3929,7 @@ enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
- nomem:
+nomem:
ecore_chain_free(p_dev, p_chain);
return rc;
}
@@ -3764,19 +4305,14 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
{
struct coalescing_timeset *p_coal_timeset;
- if (IS_VF(p_hwfn->p_dev)) {
- DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n");
- return ECORE_INVAL;
- }
-
if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
DP_NOTICE(p_hwfn, true,
"Coalescing configuration not enabled\n");
return ECORE_INVAL;
}
- OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
p_coal_timeset = p_eth_qzone;
+ OSAL_MEMSET(p_eth_qzone, 0, eth_qzone_size);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
@@ -3784,15 +4320,55 @@ static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct ecore_ptt *p_ptt;
+
+ /* TODO - Configuring a single queue's coalescing but
+ * claiming all queues are abiding same configuration
+ * for PF and VF both.
+ */
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_set_coalesce(p_hwfn, rx_coal,
+ tx_coal, p_cid);
+
+ p_ptt = ecore_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return ECORE_AGAIN;
+
+ if (rx_coal) {
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ }
+
+ if (tx_coal) {
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+ }
+out:
+ ecore_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id)
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
{
struct ustorm_eth_queue_zone eth_qzone;
- u16 fw_qid = 0;
+ u8 timeset, timer_res;
u32 address;
enum _ecore_status_t rc;
- u8 timeset, timer_res;
/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
if (coalesce <= 0x7F) {
@@ -3807,35 +4383,32 @@ enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
}
timeset = (u8)(coalesce >> timer_res);
- rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->abs.sb_idx, false);
if (rc != ECORE_SUCCESS)
goto out;
- address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset);
if (rc != ECORE_SUCCESS)
goto out;
- p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
out:
return rc;
}
enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id)
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid)
{
struct xstorm_eth_queue_zone eth_qzone;
- u16 fw_qid = 0;
+ u8 timeset, timer_res;
u32 address;
enum _ecore_status_t rc;
- u8 timeset, timer_res;
/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
if (coalesce <= 0x7F) {
@@ -3851,22 +4424,16 @@ enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
timeset = (u8)(coalesce >> timer_res);
- rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
+ rc = ecore_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->abs.sb_idx, true);
if (rc != ECORE_SUCCESS)
goto out;
- address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
- if (rc != ECORE_SUCCESS)
- goto out;
-
- p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
out:
return rc;
}
@@ -4292,3 +4859,16 @@ int ecore_device_num_ports(struct ecore_dev *p_dev)
return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
}
+
+void ecore_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid,
+ __le16 *fw_lsb,
+ u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h
index 042c0af2..e64a768d 100644
--- a/drivers/net/qede/base/ecore_dev_api.h
+++ b/drivers/net/qede/base/ecore_dev_api.h
@@ -58,18 +58,38 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev);
void ecore_resc_setup(struct ecore_dev *p_dev);
struct ecore_hw_init_params {
- /* tunnelling parameters */
- struct ecore_tunn_start_params *p_tunn;
+ /* Tunnelling parameters */
+ struct ecore_tunnel_info *p_tunn;
+
bool b_hw_start;
- /* interrupt mode [msix, inta, etc.] to use */
+
+ /* Interrupt mode [msix, inta, etc.] to use */
enum ecore_int_mode int_mode;
-/* npar tx switching to be used for vports configured for tx-switching */
+ /* NPAR tx switching to be used for vports configured for tx-switching
+ */
bool allow_npar_tx_switch;
- /* binary fw data pointer in binary fw file */
+
+ /* Binary fw data pointer in binary fw file */
const u8 *bin_fw_data;
- /* the OS Epoch time in seconds */
- u32 epoch;
+
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define ECORE_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
};
/**
@@ -131,22 +151,47 @@ void ecore_prepare_hibernate(struct ecore_dev *p_dev);
*/
void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn);
-/**
- * @brief ecore_hw_reset -
- *
- * @param p_dev
- *
- * @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev);
+enum ecore_hw_prepare_result {
+ ECORE_HW_PREPARE_SUCCESS,
+
+ /* FAILED results indicate probe has failed & cleaned up */
+ ECORE_HW_PREPARE_FAILED_ENG2,
+ ECORE_HW_PREPARE_FAILED_ME,
+ ECORE_HW_PREPARE_FAILED_MEM,
+ ECORE_HW_PREPARE_FAILED_DEV,
+ ECORE_HW_PREPARE_FAILED_NVM,
+
+ /* BAD results indicate probe is passed even though some wrongness
+ * has occurred; Trying to actually use [I.e., hw_init()] might have
+ * dire reprecautions.
+ */
+ ECORE_HW_PREPARE_BAD_IOV,
+ ECORE_HW_PREPARE_BAD_MCP,
+ ECORE_HW_PREPARE_BAD_IGU,
+};
struct ecore_hw_prepare_params {
- /* personality to initialize */
+ /* Personality to initialize */
int personality;
- /* force the driver's default resource allocation */
+
+ /* Force the driver's default resource allocation */
bool drv_resc_alloc;
- /* check the reg_fifo after any register access */
+
+ /* Check the reg_fifo after any register access */
bool chk_reg_fifo;
+
+ /* Request the MFW to initiate PF FLR */
+ bool initiate_pf_flr;
+
+ /* The OS Epoch time in seconds */
+ u32 epoch;
+
+ /* Allow prepare to pass even if some initializations are failing.
+ * If set, the `p_prepare_res' field would be set with the return,
+ * and might allow probe to pass even if there are certain issues.
+ */
+ bool b_relaxed_probe;
+ enum ecore_hw_prepare_result p_relaxed_res;
};
/**
@@ -368,7 +413,8 @@ ecore_chain_alloc(struct ecore_dev *p_dev,
enum ecore_chain_cnt_type cnt_type,
u32 num_elems,
osal_size_t elem_size,
- struct ecore_chain *p_chain);
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl);
/**
* @brief ecore_chain_free - Free chain DMA memory
@@ -515,41 +561,24 @@ enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 id,
bool is_vf);
-
/**
- * @brief ecore_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
- * The fact that we can configure coalescing to up to 511, but on varying
- * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
- * for the highest values.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param coalesce - Coalesce value in micro seconds.
- * @param qid - Queue index.
- * @param qid - SB Id
- *
- * @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id);
-
-/**
- * @brief ecore_set_txq_coalesce - Configure coalesce parameters for a Tx queue
- * While the API allows setting coalescing per-qid, all tx queues sharing a
- * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * @brief ecore_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * Tx queue. The fact that we can configure coalescing to up to 511, but on
+ * varying accuracy [the bigger the value the less accurate] up to a mistake
+ * of 3usec for the highest values.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
* @param p_hwfn
- * @param p_ptt
- * @param coalesce - Coalesce value in micro seconds.
- * @param qid - Queue index.
- * @param qid - SB Id
+ * @param rx_coal - Rx Coalesce value in micro seconds.
+ * @param tx_coal - TX Coalesce value in micro seconds.
+ * @param p_handle
*
* @return enum _ecore_status_t
- */
-enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 coalesce, u8 qid, u16 sb_id);
+ **/
+enum _ecore_status_t
+ecore_set_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal,
+ u16 tx_coal, void *p_handle);
#endif
diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h
index 6395b7cd..2acd864d 100644
--- a/drivers/net/qede/base/ecore_gtt_reg_addr.h
+++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h
@@ -10,43 +10,43 @@
#define GTT_REG_ADDR_H
/* Win 2 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
/* Win 3 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
/* Win 4 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
/* Win 5 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
/* Win 6 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL
/* Win 7 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL
/* Win 8 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL
/* Win 9 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL
/* Win 10 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL
/* Win 11 */
-/* Access:RW DataWidth:0x20 Chips: BB_A0 BB_B0 K2 */
+/* Access:RW DataWidth:0x20 */
#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL
#endif
diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h
index 179d410f..3042ed55 100644
--- a/drivers/net/qede/base/ecore_hsi_common.h
+++ b/drivers/net/qede/base/ecore_hsi_common.h
@@ -75,306 +75,306 @@ struct xstorm_core_conn_st_ctx {
__le32 reserved0[55] /* Pad to 15 cycles */;
};
-struct xstorm_core_conn_ag_ctx {
+struct e4_xstorm_core_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 core_state /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
/* exist_in_qm1 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
/* exist_in_qm2 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
/* exist_in_qm3 */
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
/* bit4 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
/* cf_array_active */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
/* bit6 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
/* bit7 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
/* bit8 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
/* bit9 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
/* bit10 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
/* bit11 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
/* bit12 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
/* bit13 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
/* bit14 */
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
/* bit15 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
/* timer0cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
/* timer1cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
/* timer2cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
/* timer_stop_all */
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
/* cf10 */
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
/* cf11 */
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
/* cf12 */
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
/* cf13 */
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
/* cf14 */
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
/* cf15 */
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
/* cf16 */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
/* cf_array_cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
/* cf18 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
/* cf19 */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
/* cf20 */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
/* cf21 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
/* cf22 */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
/* cf0en */
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
/* cf1en */
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
/* cf2en */
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
/* cf3en */
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
/* cf4en */
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
/* cf5en */
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
/* cf6en */
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
/* cf7en */
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
/* cf8en */
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
/* cf9en */
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
/* cf10en */
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
/* cf11en */
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
/* cf12en */
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
/* cf13en */
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
/* cf14en */
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
/* cf15en */
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
/* cf16en */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
/* cf_array_cf_en */
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
/* cf18en */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
/* cf19en */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
/* cf20en */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
/* cf21en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
/* cf22en */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
/* cf23en */
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
/* rule0en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
/* rule1en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
/* rule2en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
/* rule3en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
/* rule4en */
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
/* rule5en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
/* rule6en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
/* rule7en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
/* rule8en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
/* rule9en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
/* rule10en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
/* rule11en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
/* rule12en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
/* rule13en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
/* rule14en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
/* rule15en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
/* rule16en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
/* rule17en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
/* rule18en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
/* rule19en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
/* rule20en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
/* rule21en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
/* rule22en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
/* rule23en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
/* rule24en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
/* rule25en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
/* bit16 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
/* bit17 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
/* bit18 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
/* bit19 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
/* bit20 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
/* bit21 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
/* cf23 */
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2 /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
__le16 consolid_prod /* physical_q1 */;
@@ -410,7 +410,7 @@ struct xstorm_core_conn_ag_ctx {
u8 byte13 /* byte13 */;
u8 byte14 /* byte14 */;
u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
+ u8 e5_reserved /* e5_reserved */;
__le16 word11 /* word11 */;
__le32 reg10 /* reg10 */;
__le32 reg11 /* reg11 */;
@@ -428,89 +428,89 @@ struct xstorm_core_conn_ag_ctx {
__le16 word15 /* word15 */;
};
-struct tstorm_core_conn_ag_ctx {
+struct e4_tstorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
@@ -532,63 +532,63 @@ struct tstorm_core_conn_ag_ctx {
__le32 reg10 /* reg10 */;
};
-struct ustorm_core_conn_ag_ctx {
+struct e4_ustorm_core_conn_ag_ctx {
u8 reserved /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* conn_dpi */;
@@ -628,11 +628,11 @@ struct core_conn_context {
/* xstorm storm context */
struct xstorm_core_conn_st_ctx xstorm_st_context;
/* xstorm aggregative context */
- struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
/* tstorm aggregative context */
- struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
/* ustorm aggregative context */
- struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
/* mstorm storm context */
struct mstorm_core_conn_st_ctx mstorm_st_context;
/* ustorm storm context */
@@ -660,6 +660,7 @@ enum core_event_opcode {
CORE_EVENT_TX_QUEUE_STOP,
CORE_EVENT_RX_QUEUE_START,
CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
MAX_CORE_EVENT_OPCODE
};
@@ -743,6 +744,7 @@ enum core_ramrod_cmd_id {
CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
+ CORE_RAMROD_RX_QUEUE_FLUSH /* RX Flush queue Ramrod */,
MAX_CORE_RAMROD_CMD_ID
};
@@ -834,7 +836,12 @@ struct core_rx_fast_path_cqe {
__le16 packet_length /* Total packet length (from the parser) */;
__le16 vlan /* 802.1q VLAN tag */;
struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
- __le32 reserved[4];
+/* bit- map: each bit represents a specific error. errors indications are
+ * provided by the cracker. see spec for detailed description
+ */
+ struct parsing_err_flags err_flags;
+ __le16 reserved0;
+ __le32 reserved1[3];
};
/*
@@ -860,7 +867,8 @@ struct core_rx_slow_path_cqe {
u8 type /* CQE type */;
u8 ramrod_cmd_id;
__le16 echo;
- __le32 reserved1[7];
+ struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */;
+ __le32 reserved1[5];
};
/*
@@ -926,36 +934,51 @@ struct core_rx_stop_ramrod_data {
/*
* Flags for Core TX BD
*/
-struct core_tx_bd_flags {
- u8 as_bitfield;
+struct core_tx_bd_data {
+ __le16 as_bitfield;
/* Do not allow additional VLAN manipulations on this packet (DCB) */
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
/* Insert VLAN into packet */
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1
-#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
/* This is the first BD of the packet (for debug) */
-#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1
-#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
/* Calculate the IP checksum for the packet */
-#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
/* Calculate the L4 checksum for the packet */
-#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
/* Packet is IPv6 with extensions */
-#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1
-#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
/* If IPv6+ext, and if l4_csum is 1, than this field indicates L4 protocol:
* 0-TCP, 1-UDP
*/
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
/* The pseudo checksum mode to place in the L4 checksum field. Required only
- * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
+ * when IPv6+ext and l4_csum is set. (use enum core_l4_pseudo_checksum_mode)
*/
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1
-#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+/* Number of BDs that make up one packet - width wide enough to present
+ * CORE_LL2_TX_MAX_BDS_PER_PACKET
+ */
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+/* Use roce_flavor enum - Differentiate between Roce flavors is valid when
+ * connType is ROCE (use enum core_roce_flavor_type)
+ */
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+/* Calculate ip length */
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x3
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 14
};
/*
@@ -968,28 +991,18 @@ struct core_tx_bd {
* packets: echo data to pass to Rx
*/
__le16 nw_vlan_or_lb_echo;
- u8 bitfield0;
-/* Number of BDs that make up one packet - width wide enough to present
- * X_CORE_LL2_NUM_OF_BDS_ON_ST_CT
- */
-#define CORE_TX_BD_NBDS_MASK 0xF
-#define CORE_TX_BD_NBDS_SHIFT 0
-/* Use roce_flavor enum - Diffrentiate between Roce flavors is valid when
- * connType is ROCE (use enum core_roce_flavor_type)
- */
-#define CORE_TX_BD_ROCE_FLAV_MASK 0x1
-#define CORE_TX_BD_ROCE_FLAV_SHIFT 4
-#define CORE_TX_BD_RESERVED0_MASK 0x7
-#define CORE_TX_BD_RESERVED0_SHIFT 5
- struct core_tx_bd_flags bd_flags /* BD Flags */;
+ struct core_tx_bd_data bd_data /* BD Flags */;
__le16 bitfield1;
+/* L4 Header Offset from start of packet (in Words). This is needed if both
+ * l4_csum and ipv6_ext are set
+ */
#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
/* Packet destination - Network, LB (use enum core_tx_dest) */
#define CORE_TX_BD_TX_DST_MASK 0x1
#define CORE_TX_BD_TX_DST_SHIFT 14
-#define CORE_TX_BD_RESERVED1_MASK 0x1
-#define CORE_TX_BD_RESERVED1_SHIFT 15
+#define CORE_TX_BD_RESERVED_MASK 0x1
+#define CORE_TX_BD_RESERVED_SHIFT 15
};
@@ -1034,13 +1047,13 @@ struct core_tx_stop_ramrod_data {
/*
* Enum flag for what type of dcb data to update
*/
-enum dcb_dhcp_update_flag {
+enum dcb_dscp_update_mode {
/* use when no change should be done to dcb data */
- DONT_UPDATE_DCB_DHCP,
+ DONT_UPDATE_DCB_DSCP,
UPDATE_DCB /* use to update only l2 (vlan) priority */,
- UPDATE_DSCP /* use to update only l3 dhcp */,
- UPDATE_DCB_DSCP /* update vlan pri and dhcp */,
- MAX_DCB_DHCP_UPDATE_FLAG
+ UPDATE_DSCP /* use to update only l3 dscp */,
+ UPDATE_DCB_DSCP /* update vlan pri and dscp */,
+ MAX_DCB_DSCP_UPDATE_FLAG
};
@@ -1224,6 +1237,10 @@ enum iwarp_ll2_tx_queues {
IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
/* LL2 queue for unaligned packets sent aligned by the driver */
IWARP_LL2_ALIGNED_TX_QUEUE,
+/* LL2 queue for unaligned packets sent aligned and was right-trimmed by the
+ * driver
+ */
+ IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
IWARP_LL2_ERROR /* Error indication */,
MAX_IWARP_LL2_TX_QUEUES
};
@@ -1265,6 +1282,7 @@ enum malicious_vf_error_id {
/* Tunneled packet with IPv6+Ext without a proper number of BDs */
ETH_TUNN_IPV6_EXT_NBD_ERR,
ETH_CONTROL_PACKET_VIOLATION /* VF sent control frame such as PFC */,
+ ETH_ANTI_SPOOFING_ERR /* Anti-Spoofing verification failure */,
MAX_MALICIOUS_VF_ERROR_ID
};
@@ -1311,9 +1329,13 @@ enum personality_type {
* tunnel configuration
*/
struct pf_start_tunnel_config {
-/* Set VXLAN tunnel UDP destination port. */
+/* Set VXLAN tunnel UDP destination port to vxlan_udp_port. If not set -
+ * FW will use a default port
+ */
u8 set_vxlan_udp_port_flg;
-/* Set GENEVE tunnel UDP destination port. */
+/* Set GENEVE tunnel UDP destination port to geneve_udp_port. If not set -
+ * FW will use a default port
+ */
u8 set_geneve_udp_port_flg;
u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
/* If set, enable l2 GENEVE tunnel in TX path. */
@@ -1329,8 +1351,10 @@ struct pf_start_tunnel_config {
u8 tunnel_clss_ipgeneve;
u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */;
u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */;
- __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
- __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+/* VXLAN tunnel UDP destination port. Valid if set_vxlan_udp_port_flg=1 */
+ __le16 vxlan_udp_port;
+/* GENEVE tunnel UDP destination port. Valid if set_geneve_udp_port_flg=1 */
+ __le16 geneve_udp_port;
};
/*
@@ -1431,13 +1455,13 @@ struct pf_update_tunnel_config {
*/
struct pf_update_ramrod_data {
u8 pf_id;
- u8 update_eth_dcb_data_flag /* Update Eth DCB data indication */;
- u8 update_fcoe_dcb_data_flag /* Update FCOE DCB data indication */;
- u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB data indication */;
- u8 update_roce_dcb_data_flag /* Update ROCE DCB data indication */;
+ u8 update_eth_dcb_data_mode /* Update Eth DCB data indication */;
+ u8 update_fcoe_dcb_data_mode /* Update FCOE DCB data indication */;
+ u8 update_iscsi_dcb_data_mode /* Update iSCSI DCB data indication */;
+ u8 update_roce_dcb_data_mode /* Update ROCE DCB data indication */;
/* Update RROCE (RoceV2) DCB data indication */
- u8 update_rroce_dcb_data_flag;
- u8 update_iwarp_dcb_data_flag /* Update IWARP DCB data indication */;
+ u8 update_rroce_dcb_data_mode;
+ u8 update_iwarp_dcb_data_mode /* Update IWARP DCB data indication */;
u8 update_mf_vlan_flag /* Update MF outer vlan Id */;
struct protocol_dcb_data eth_dcb_data /* core eth related fields */;
struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */;
@@ -1596,6 +1620,8 @@ struct tstorm_per_port_stat {
struct regpair fcoe_irregular_pkt;
/* packet is an ROCE irregular packet */
struct regpair roce_irregular_pkt;
+/* packet is an IWARP irregular packet */
+ struct regpair iwarp_irregular_pkt;
/* packet is an ETH irregular packet */
struct regpair eth_irregular_pkt;
/* packet is an TOE irregular packet */
@@ -1846,8 +1872,11 @@ struct dmae_cmd {
#define DMAE_CMD_SRC_VF_ID_SHIFT 0
#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
#define DMAE_CMD_DST_VF_ID_SHIFT 8
- __le32 comp_addr_lo /* PCIe completion address low or grc address */;
-/* PCIe completion address high or reserved (if completion address is in GRC) */
+/* PCIe completion address low in bytes or GRC completion address in DW */
+ __le32 comp_addr_lo;
+/* PCIe completion address high in bytes or reserved (if completion address is
+ * GRC)
+ */
__le32 comp_addr_hi;
__le32 comp_val /* Value to write to completion address */;
__le32 crc32 /* crc16 result */;
@@ -1919,6 +1948,92 @@ enum dmae_cmd_src_enum {
};
+struct e4_mstorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_ystorm_core_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2 /* byte2 */;
+ u8 byte3 /* byte3 */;
+ __le16 word0 /* word0 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le16 word1 /* word1 */;
+ __le16 word2 /* word2 */;
+ __le16 word3 /* word3 */;
+ __le16 word4 /* word4 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+};
+
+
/*
* IGU cleanup command
*/
@@ -2002,44 +2117,6 @@ struct igu_msix_vector {
};
-struct mstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
-
/*
* per encapsulation type enabling flags
*/
@@ -2187,10 +2264,6 @@ struct sdm_op_gen {
#define SDM_OP_GEN_RESERVED_SHIFT 20
};
-
-
-
-
struct ystorm_core_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
diff --git a/drivers/net/qede/base/ecore_hsi_debug_tools.h b/drivers/net/qede/base/ecore_hsi_debug_tools.h
index e82b0d4c..917e8f4c 100644
--- a/drivers/net/qede/base/ecore_hsi_debug_tools.h
+++ b/drivers/net/qede/base/ecore_hsi_debug_tools.h
@@ -92,6 +92,13 @@ enum block_addr {
GRCBASE_MS = 0x6a0000,
GRCBASE_PHY_PCIE = 0x620000,
GRCBASE_LED = 0x6b8000,
+ GRCBASE_AVS_WRAP = 0x6b0000,
+ GRCBASE_RGFS = 0x1fa0000,
+ GRCBASE_RGSRC = 0x1fa8000,
+ GRCBASE_TGFS = 0x1fb0000,
+ GRCBASE_TGSRC = 0x1fb8000,
+ GRCBASE_PTLD = 0x1fc0000,
+ GRCBASE_YPLD = 0x1fe0000,
GRCBASE_MISC_AEU = 0x8000,
GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
@@ -177,6 +184,13 @@ enum block_id {
BLOCK_MS,
BLOCK_PHY_PCIE,
BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_RGFS,
+ BLOCK_RGSRC,
+ BLOCK_TGFS,
+ BLOCK_TGSRC,
+ BLOCK_PTLD,
+ BLOCK_YPLD,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
@@ -198,6 +212,10 @@ enum bin_dbg_buffer_type {
BIN_BUF_DBG_ATTN_REGS /* Attention registers */,
BIN_BUF_DBG_ATTN_INDEXES /* Attention indexes */,
BIN_BUF_DBG_ATTN_NAME_OFFSETS /* Attention name offsets */,
+ BIN_BUF_DBG_BUS_BLOCKS /* Debug Bus blocks */,
+ BIN_BUF_DBG_BUS_LINES /* Debug Bus lines */,
+ BIN_BUF_DBG_BUS_BLOCKS_USER_DATA /* Debug Bus blocks user data */,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS /* Debug Bus line name offsets */,
BIN_BUF_DBG_PARSING_STRINGS /* Debug Tools parsing strings */,
MAX_BIN_DBG_BUFFER_TYPE
};
@@ -209,8 +227,8 @@ enum bin_dbg_buffer_type {
struct dbg_attn_bit_mapping {
__le16 data;
/* The index of an attention in the blocks attentions list
- * (if is_unused_idx_cnt=0), or a number of consecutive unused attention bits
- * (if is_unused_idx_cnt=1)
+ * (if is_unused_bit_cnt=0), or a number of consecutive unused attention bits
+ * (if is_unused_bit_cnt=1)
*/
#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
@@ -259,10 +277,10 @@ struct dbg_attn_reg_result {
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
/* Number of attention indexes in this register */
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
-/* Offset of this registers block attention indexes (values in the range
- * 0..number of block attentions)
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+/* The offset of this registers attentions within the blocks attentions
+ * list (a value in the range 0..number of block attentions-1)
*/
__le16 attn_idx_offset;
__le16 reserved;
@@ -279,7 +297,7 @@ struct dbg_attn_block_result {
/* Value from dbg_attn_type enum */
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
-/* Number of registers in the blok in which at least one attention bit is set */
+/* Number of registers in block in which at least one attention bit is set */
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
/* Offset of this registers block attention names in the attention name offsets
@@ -314,17 +332,17 @@ struct dbg_mode_hdr {
*/
struct dbg_attn_reg {
struct dbg_mode_hdr mode /* Mode header */;
-/* Offset of this registers block attention indexes (values in the range
- * 0..number of block attentions)
+/* The offset of this registers attentions within the blocks attentions
+ * list (a value in the range 0..number of block attentions-1)
*/
__le16 attn_idx_offset;
__le32 data;
/* STS attention register GRC address (in dwords) */
#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-/* Number of attention indexes in this register */
-#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
-#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+/* Number of attention in this register */
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
/* STS_CLR attention register GRC address (in dwords) */
__le32 sts_clr_address;
/* MASK attention register GRC address (in dwords) */
@@ -344,6 +362,53 @@ enum dbg_attn_type {
/*
+ * Debug Bus block data
+ */
+struct dbg_bus_block {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the Debug Bus lines array. */
+ __le16 lines_offset;
+};
+
+
+/*
+ * Debug Bus block user data
+ */
+struct dbg_bus_block_user_data {
+/* Number of debug lines in this block (excluding signature & latency events) */
+ u8 num_of_lines;
+/* Indicates if this block has a latency events debug line (0/1). */
+ u8 has_latency_events;
+/* Offset of this blocks lines in the debug bus line name offsets array. */
+ __le16 names_offset;
+};
+
+
+/*
+ * Block Debug line data
+ */
+struct dbg_bus_line {
+ u8 data;
+/* Number of groups in the line (0-3) */
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+/* Indicates if this is a 128b line (0) or a 256b line (1). */
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+/* Four 2-bit values, indicating the size of each group minus 1 (i.e.
+ * value=0 means size=1, value=1 means size=2, etc), starting from lsb.
+ * The sizes are in dwords (if is_256b=0) or in qwords (if is_256b=1).
+ */
+ u8 group_sizes;
+};
+
+
+/*
* condition header for registers dump
*/
struct dbg_dump_cond_hdr {
@@ -367,8 +432,11 @@ struct dbg_dump_mem {
/* register size (in dwords) */
#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_RESERVED_MASK 0xFF
-#define DBG_DUMP_MEM_RESERVED_SHIFT 24
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
};
@@ -378,10 +446,13 @@ struct dbg_dump_mem {
struct dbg_dump_reg {
__le32 data;
/* register address (in dwords) */
-#define DBG_DUMP_REG_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF /* register address (in dwords) */
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF /* register size (in dwords) */
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
};
@@ -414,8 +485,11 @@ struct dbg_idle_chk_cond_hdr {
struct dbg_idle_chk_cond_reg {
__le32 data;
/* Register GRC address (in dwords) */
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
/* value from block_id enum */
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
@@ -431,8 +505,11 @@ struct dbg_idle_chk_cond_reg {
struct dbg_idle_chk_info_reg {
__le32 data;
/* Register GRC address (in dwords) */
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0xFFFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+/* indicates if the register is wide-bus */
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
/* value from block_id enum */
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
@@ -534,17 +611,21 @@ enum dbg_idle_chk_severity_types {
* Debug Bus block data
*/
struct dbg_bus_block_data {
-/* Indicates if the block is enabled for recording (0/1) */
- u8 enabled;
- u8 hw_id /* HW ID associated with the block */;
+ __le16 data;
+/* 4-bit value: bit i set -> dword/qword i is enabled. */
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_ENABLE_MASK_SHIFT 0
+/* Number of dwords/qwords to shift right the debug data (0-3) */
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_RIGHT_SHIFT_SHIFT 4
+/* 4-bit value: bit i set -> dword/qword i is forced valid. */
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_VALID_MASK_SHIFT 8
+/* 4-bit value: bit i set -> dword/qword i frame bit is forced. */
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_MASK 0xF
+#define DBG_BUS_BLOCK_DATA_FORCE_FRAME_MASK_SHIFT 12
u8 line_num /* Debug line number to select */;
- u8 right_shift /* Number of units to right the debug data (0-3) */;
- u8 cycle_en /* 4-bit value: bit i set -> unit i is enabled. */;
-/* 4-bit value: bit i set -> unit i is forced valid. */
- u8 force_valid;
-/* 4-bit value: bit i set -> unit i frame bit is forced. */
- u8 force_frame;
- u8 reserved;
+ u8 hw_id /* HW ID associated with the block */;
};
@@ -594,6 +675,21 @@ enum dbg_bus_constraint_ops {
/*
+ * Debug Bus trigger state data
+ */
+struct dbg_bus_trigger_state_data {
+ u8 data;
+/* 4-bit value: bit i set -> dword i of the trigger state block
+ * (after right shift) is enabled.
+ */
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_BLOCK_SHIFTED_ENABLE_MASK_SHIFT 0
+/* 4-bit value: bit i set -> dword i is compared by a constraint */
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_MASK 0xF
+#define DBG_BUS_TRIGGER_STATE_DATA_CONSTRAINT_DWORD_MASK_SHIFT 4
+};
+
+/*
* Debug Bus memory address
*/
struct dbg_bus_mem_addr {
@@ -640,14 +736,8 @@ union dbg_bus_storm_eid_params {
* Debug Bus Storm data
*/
struct dbg_bus_storm_data {
-/* Indicates if the Storm is enabled for fast debug recording (0/1) */
- u8 fast_enabled;
-/* Fast debug Storm mode, valid only if fast_enabled is set */
- u8 fast_mode;
-/* Indicates if the Storm is enabled for slow debug recording (0/1) */
- u8 slow_enabled;
-/* Slow debug Storm mode, valid only if slow_enabled is set */
- u8 slow_mode;
+ u8 enabled /* indicates if the Storm is enabled for recording */;
+ u8 mode /* Storm debug mode, valid only if the Storm is enabled */;
u8 hw_id /* HW ID associated with the Storm */;
u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */;
/* 1 = EID range filter, 0 = EID mask filter. Valid only if eid_filter_en is
@@ -657,7 +747,6 @@ struct dbg_bus_storm_data {
u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */;
/* EID filter params to filter on. Valid only if eid_filter_en is set. */
union dbg_bus_storm_eid_params eid_filter_params;
- __le16 reserved;
/* CID to filter on. Valid only if cid_filter_en is set. */
__le32 cid;
};
@@ -669,20 +758,18 @@ struct dbg_bus_data {
__le32 app_version /* The tools version number of the application */;
u8 state /* The current debug bus state */;
u8 hw_dwords /* HW dwords per cycle */;
- u8 next_hw_id /* Next HW ID to be associated with an input */;
+/* The HW IDs of the recorded HW blocks, where bits i*3..i*3+2 contain the
+ * HW ID of dword/qword i
+ */
+ __le16 hw_id_mask;
u8 num_enabled_blocks /* Number of blocks enabled for recording */;
u8 num_enabled_storms /* Number of Storms enabled for recording */;
u8 target /* Output target */;
- u8 next_trigger_state /* ID of next trigger state to be added */;
-/* ID of next filter/trigger constraint to be added */
- u8 next_constraint_id;
u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */;
u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */;
/* Indicates if timestamp recording is enabled (0/1) */
u8 timestamp_input_en;
u8 filter_en /* Indicates if the recording filter is enabled (0/1) */;
-/* Indicates if the recording trigger is enabled (0/1) */
- u8 trigger_en;
/* If true, the next added constraint belong to the filter. Otherwise,
* it belongs to the last added trigger state. Valid only if either filter or
* triggers are enabled.
@@ -696,6 +783,14 @@ struct dbg_bus_data {
* Valid only if both filter and trigger are enabled (0/1)
*/
u8 filter_post_trigger;
+ __le16 reserved;
+/* Indicates if the recording trigger is enabled (0/1) */
+ u8 trigger_en;
+/* trigger states data */
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 next_trigger_state /* ID of next trigger state to be added */;
+/* ID of next filter/trigger constraint to be added */
+ u8 next_constraint_id;
/* If true, all inputs are associated with HW ID 0. Otherwise, each input is
* assigned a different HW ID (0/1)
*/
@@ -706,9 +801,8 @@ struct dbg_bus_data {
* DBG_BUS_TARGET_ID_PCI.
*/
struct dbg_bus_pci_buf_data pci_buf;
- __le16 reserved;
/* Debug Bus data for each block */
- struct dbg_bus_block_data blocks[80];
+ struct dbg_bus_block_data blocks[88];
/* Debug Bus data for each block */
struct dbg_bus_storm_data storms[6];
};
@@ -738,17 +832,6 @@ enum dbg_bus_frame_modes {
/*
- * Debug bus input types
- */
-enum dbg_bus_input_types {
- DBG_BUS_INPUT_TYPE_STORM,
- DBG_BUS_INPUT_TYPE_BLOCK,
- MAX_DBG_BUS_INPUT_TYPES
-};
-
-
-
-/*
* Debug bus other engine mode
*/
enum dbg_bus_other_engine_modes {
@@ -842,16 +925,17 @@ enum dbg_bus_targets {
};
+
/*
* GRC Dump data
*/
struct dbg_grc_data {
+/* Indicates if the GRC parameters were initialized */
+ u8 params_initialized;
+ u8 reserved1;
+ __le16 reserved2;
/* Value of each GRC parameter. Array size must match enum dbg_grc_params. */
- __le32 param_val[40];
-/* Indicates for each GRC parameter if it was set by the user (0/1).
- * Array size must match the enum dbg_grc_params.
- */
- u8 param_set_by_user[40];
+ __le32 param_val[48];
};
@@ -901,6 +985,8 @@ enum dbg_grc_params {
DBG_GRC_PARAM_PARITY_SAFE,
DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */,
DBG_GRC_PARAM_DUMP_PHY /* dump PHY memories (0/1) */,
+ DBG_GRC_PARAM_NO_MCP /* dont perform MCP commands (0/1) */,
+ DBG_GRC_PARAM_NO_FW_VER /* dont read FW/MFW version (0/1) */,
MAX_DBG_GRC_PARAMS
};
@@ -975,7 +1061,10 @@ enum dbg_status {
DBG_STATUS_REG_FIFO_BAD_DATA,
DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_MULTI_BLOCKS_WITH_FILTER,
+ DBG_STATUS_FILTER_BUG,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INVALID_TRIGGER_DWORD_OFFSET,
+ DBG_STATUS_DBG_BUS_IN_USE,
MAX_DBG_STATUS
};
@@ -1014,9 +1103,9 @@ struct dbg_tools_data {
struct idle_chk_data idle_chk /* Idle Check data */;
u8 mode_enable[40] /* Indicates if a mode is enabled (0/1) */;
/* Indicates if a block is in reset state (0/1) */
- u8 block_in_reset[80];
+ u8 block_in_reset[88];
u8 chip_id /* Chip ID (from enum chip_ids) */;
- u8 platform_id /* Platform ID (from enum platform_ids) */;
+ u8 platform_id /* Platform ID */;
u8 initialized /* Indicates if the data was initialized */;
u8 reserved;
};
diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h
index e26c1833..397c408d 100644
--- a/drivers/net/qede/base/ecore_hsi_eth.h
+++ b/drivers/net/qede/base/ecore_hsi_eth.h
@@ -34,315 +34,315 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct xstorm_eth_conn_ag_ctx {
+struct e4_xstorm_eth_conn_ag_ctx {
u8 reserved0 /* cdu_validation */;
u8 eth_state /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
/* exist_in_qm1 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
/* exist_in_qm2 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
/* exist_in_qm3 */
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
/* bit4 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
/* cf_array_active */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
/* bit6 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
/* bit7 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
/* bit8 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
/* bit9 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
/* bit10 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
/* bit11 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
/* bit12 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
/* bit13 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
/* bit14 */
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
/* bit15 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
/* timer0cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
/* timer1cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
/* timer2cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
/* timer_stop_all */
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
/* cf4 */
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
/* cf5 */
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
/* cf6 */
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
/* cf7 */
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
/* cf8 */
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
/* cf9 */
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
/* cf10 */
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
/* cf11 */
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
/* cf12 */
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
/* cf13 */
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
/* cf14 */
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
/* cf15 */
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
/* cf16 */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
/* cf_array_cf */
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
/* cf18 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
/* cf19 */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
/* cf20 */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
/* cf21 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
/* cf22 */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
/* cf0en */
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
/* cf1en */
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
/* cf2en */
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
/* cf3en */
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
/* cf4en */
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
/* cf5en */
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
/* cf6en */
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
/* cf7en */
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
/* cf8en */
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
/* cf9en */
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
/* cf10en */
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
/* cf11en */
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
/* cf12en */
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
/* cf13en */
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
/* cf14en */
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
/* cf15en */
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
/* cf16en */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
/* cf_array_cf_en */
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
/* cf18en */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
/* cf19en */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
/* cf20en */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
/* cf21en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
/* cf22en */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
/* cf23en */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
/* rule0en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
/* rule1en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
/* rule2en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
/* rule3en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
/* rule4en */
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
/* rule5en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
/* rule6en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
/* rule7en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
/* rule8en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
/* rule9en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
/* rule10en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
/* rule11en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
/* rule12en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
/* rule13en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
/* rule14en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
/* rule15en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
/* rule16en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
/* rule17en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
/* rule18en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
/* rule19en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
/* rule20en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
/* rule21en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
/* rule22en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
/* rule23en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
/* rule24en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
/* rule25en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
/* bit16 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
/* bit17 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
/* bit18 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
/* bit19 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
/* bit20 */
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
/* bit21 */
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
/* cf23 */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id /* byte2 */;
__le16 physical_q0 /* physical_q0 */;
- __le16 quota /* physical_q1 */;
+ __le16 e5_reserved1 /* physical_q1 */;
__le16 edpm_num_bds /* physical_q2 */;
__le16 tx_bd_cons /* word3 */;
__le16 tx_bd_prod /* word4 */;
@@ -375,7 +375,7 @@ struct xstorm_eth_conn_ag_ctx {
u8 byte13 /* byte13 */;
u8 byte14 /* byte14 */;
u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
+ u8 e5_reserved /* e5_reserved */;
__le16 word11 /* word11 */;
__le32 reg10 /* reg10 */;
__le32 reg11 /* reg11 */;
@@ -400,47 +400,47 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct ystorm_eth_conn_ag_ctx {
+struct e4_ystorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 state /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
/* exist_in_qm1 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
/* cf0en */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
/* cf1en */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
/* cf2en */
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
/* rule0en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
/* rule1en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
/* rule2en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
/* rule3en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
/* rule4en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* word0 */;
@@ -454,89 +454,89 @@ struct ystorm_eth_conn_ag_ctx {
__le32 reg3 /* reg3 */;
};
-struct tstorm_eth_conn_ag_ctx {
+struct e4_tstorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
+#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0 /* reg0 */;
__le32 reg1 /* reg1 */;
__le32 reg2 /* reg2 */;
@@ -558,88 +558,88 @@ struct tstorm_eth_conn_ag_ctx {
__le32 reg10 /* reg10 */;
};
-struct ustorm_eth_conn_ag_ctx {
+struct e4_ustorm_eth_conn_ag_ctx {
u8 byte0 /* cdu_validation */;
u8 byte1 /* state */;
u8 flags0;
/* exist_in_qm0 */
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
/* exist_in_qm1 */
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
/* timer0cf */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
/* timer1cf */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
/* timer2cf */
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
/* timer_stop_all */
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
/* cf4 */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
/* cf5 */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
/* cf6 */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
/* cf0en */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
/* cf1en */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
/* cf2en */
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
/* cf3en */
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
/* cf4en */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
/* cf5en */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
/* cf6en */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
/* rule0en */
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
/* rule1en */
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
/* rule2en */
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
/* rule3en */
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
/* rule4en */
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
/* rule5en */
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
/* rule6en */
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
/* rule7en */
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
/* rule8en */
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2 /* byte2 */;
u8 byte3 /* byte3 */;
__le16 word0 /* conn_dpi */;
@@ -678,15 +678,15 @@ struct eth_conn_context {
/* xstorm storm context */
struct xstorm_eth_conn_st_ctx xstorm_st_context;
/* xstorm aggregative context */
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
/* ystorm storm context */
struct ystorm_eth_conn_st_ctx ystorm_st_context;
/* ystorm aggregative context */
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
/* tstorm aggregative context */
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
/* ustorm aggregative context */
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
/* ustorm storm context */
struct ustorm_eth_conn_st_ctx ustorm_st_context;
/* mstorm storm context */
@@ -739,6 +739,7 @@ enum eth_error_code {
ETH_FILTERS_VNI_ADD_FAIL_FULL,
/* vni add filters command failed due to duplicate VNI filter */
ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ ETH_FILTERS_GFT_UPDATE_FAIL /* Fail update GFT filter. */,
MAX_ETH_ERROR_CODE
};
@@ -982,8 +983,10 @@ struct eth_vport_rss_config {
u8 rss_id;
u8 rss_mode /* The RSS mode for this function */;
u8 update_rss_key /* if set update the rss key */;
- u8 update_rss_ind_table /* if set update the indirection table */;
- u8 update_rss_capabilities /* if set update the capabilities */;
+/* if set update the indirection table values */
+ u8 update_rss_ind_table;
+/* if set update the capabilities and indirection table size. */
+ u8 update_rss_capabilities;
u8 tbl_size /* rss mask (Tbl size) */;
__le32 reserved2[2];
/* RSS indirection table */
@@ -1267,7 +1270,10 @@ struct rx_update_gft_filter_data {
/* Use enum to set type of flow using gft HW logic blocks */
u8 filter_type;
u8 filter_action /* Use to set type of action on filter */;
- u8 reserved;
+/* 0 - dont assert in case of error. Just return an error code. 1 - assert in
+ * case of error.
+ */
+ u8 assert_on_error;
};
@@ -1446,7 +1452,15 @@ struct vport_update_ramrod_data_cmn {
/* If set, MTU will be updated. Vport must be not active. */
u8 update_mtu_flg;
__le16 mtu /* New MTU value. Used if update_mtu_flg are set */;
- u8 reserved[2];
+/* If set, ctl_frame_mac_check_en and ctl_frame_ethtype_check_en will be
+ * updated
+ */
+ u8 update_ctl_frame_checks_en_flg;
+/* If set, Contorl frames will be filtered according to MAC check. */
+ u8 ctl_frame_mac_check_en;
+/* If set, Contorl frames will be filtered according to ethtype check. */
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[15];
};
struct vport_update_ramrod_mcast {
@@ -1472,6 +1486,668 @@ struct vport_update_ramrod_data {
+struct E4XstormEthConnAgCtxDqExtLdPart {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+/* bit6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+/* bit7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+/* bit8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+/* bit9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+/* bit12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+/* bit13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+/* bit14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+/* timer1cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+/* timer2cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+/* cf4 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+/* cf5 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+/* cf6 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+/* cf7 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+/* cf8 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+/* cf9 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+/* cf10 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+/* cf11 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+/* cf12 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+/* cf13 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+/* cf14 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+/* cf15 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+/* cf16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+/* cf18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+/* cf19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+/* cf20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+/* cf21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+/* cf22 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+/* cf1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+/* cf3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+/* cf4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+/* cf5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+/* cf6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+/* cf7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+/* cf8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+/* cf9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+/* cf11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+/* cf12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+/* cf13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+/* cf14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+/* cf15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+/* cf16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+/* cf23 */
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 tx_class /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+ u8 byte3 /* byte3 */;
+ u8 byte4 /* byte4 */;
+ u8 byte5 /* byte5 */;
+ u8 byte6 /* byte6 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+ __le32 reg2 /* reg2 */;
+ __le32 reg3 /* reg3 */;
+ __le32 reg4 /* reg4 */;
+};
+
+
+struct e4_mstorm_eth_conn_ag_ctx {
+ u8 byte0 /* cdu_validation */;
+ u8 byte1 /* state */;
+ u8 flags0;
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
+#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0 /* word0 */;
+ __le16 word1 /* word1 */;
+ __le32 reg0 /* reg0 */;
+ __le32 reg1 /* reg1 */;
+};
+
+
+
+
+
+struct e4_xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0 /* cdu_validation */;
+ u8 eth_state /* state */;
+ u8 flags0;
+/* exist_in_qm0 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+/* exist_in_qm1 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+/* exist_in_qm2 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+/* exist_in_qm3 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+/* bit4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+/* cf_array_active */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+/* bit10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+/* bit11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+/* bit12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
+/* bit13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
+/* bit14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+/* bit15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+/* timer0cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+/* timer1cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+/* timer2cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+/* timer_stop_all */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+/* cf_array_cf */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+/* cf0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+/* cf1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+/* cf2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+/* cf3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+/* cf4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+/* cf5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+/* cf6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+/* cf7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+/* cf8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+/* cf9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+/* cf10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+/* cf11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+/* cf12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+/* cf13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+/* cf14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+/* cf15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+/* cf16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+/* cf_array_cf_en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+/* cf18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+/* cf19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+/* cf20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+/* cf21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+/* cf22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+/* cf23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+/* rule0en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+/* rule1en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+/* rule2en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+/* rule3en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+/* rule4en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+/* rule5en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+/* rule6en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+/* rule7en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+/* rule8en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+/* rule9en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+/* rule10en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+/* rule11en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+/* rule12en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+/* rule13en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+/* rule14en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+/* rule15en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+/* rule16en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+/* rule17en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+/* rule18en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+/* rule19en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+/* rule20en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+/* rule21en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+/* rule22en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+/* rule23en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+/* rule24en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+/* rule25en */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+/* bit16 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+/* bit17 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+/* bit18 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+/* bit19 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+/* bit20 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+/* bit21 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
+#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id /* byte2 */;
+ __le16 physical_q0 /* physical_q0 */;
+ __le16 e5_reserved1 /* physical_q1 */;
+ __le16 edpm_num_bds /* physical_q2 */;
+ __le16 tx_bd_cons /* word3 */;
+ __le16 tx_bd_prod /* word4 */;
+ __le16 tx_class /* word5 */;
+ __le16 conn_dpi /* conn_dpi */;
+};
+
+
+
/*
* GFT CAM line struct
*/
@@ -1620,8 +2296,7 @@ enum gft_profile_upper_protocol_type {
* GFT RAM line struct
*/
struct gft_ram_line {
- __le32 low32bits;
-/* (use enum gft_vlan_select) */
+ __le32 lo;
#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
@@ -1684,7 +2359,7 @@ struct gft_ram_line {
#define GFT_RAM_LINE_DST_PORT_SHIFT 30
#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
- __le32 high32bits;
+ __le32 hi;
#define GFT_RAM_LINE_DSCP_MASK 0x1
#define GFT_RAM_LINE_DSCP_SHIFT 0
#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
@@ -1722,690 +2397,4 @@ enum gft_vlan_select {
};
-struct mstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-/* exist_in_qm0 */
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-/* exist_in_qm1 */
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */
-#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */
-#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
- u8 flags1;
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- __le16 word0 /* word0 */;
- __le16 word1 /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
-};
-
-
-
-
-struct xstormEthConnAgCtxDqExtLdPart {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-/* exist_in_qm0 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
-/* exist_in_qm1 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
-/* exist_in_qm2 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
-/* exist_in_qm3 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
-/* bit4 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
-/* cf_array_active */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
-/* bit6 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
-/* bit7 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
- u8 flags1;
-/* bit8 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
-/* bit9 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
-/* bit10 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
-/* bit11 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
-/* bit12 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
-/* bit13 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
-/* bit14 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
-/* bit15 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-/* timer0cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
-/* timer1cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
-/* timer2cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
-/* timer_stop_all */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
- u8 flags3;
-/* cf4 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
-/* cf5 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
-/* cf6 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
-/* cf7 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
- u8 flags4;
-/* cf8 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
-/* cf9 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
-/* cf10 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
-/* cf11 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
- u8 flags5;
-/* cf12 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
-/* cf13 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
-/* cf14 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
-/* cf15 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
- u8 flags6;
-/* cf16 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
-/* cf_array_cf */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
-/* cf18 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
-/* cf19 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
- u8 flags7;
-/* cf20 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
-/* cf21 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
-/* cf22 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
-/* cf0en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
-/* cf1en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
- u8 flags8;
-/* cf2en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
-/* cf3en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
-/* cf4en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
-/* cf5en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
-/* cf6en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
-/* cf7en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
-/* cf8en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
-/* cf9en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
- u8 flags9;
-/* cf10en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
-/* cf11en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
-/* cf12en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
-/* cf13en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
-/* cf14en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
-/* cf15en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
-/* cf16en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
-/* cf_array_cf_en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
- u8 flags10;
-/* cf18en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
-/* cf19en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
-/* cf20en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
-/* cf21en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
-/* cf22en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
-/* cf23en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
-/* rule0en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
-/* rule1en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
- u8 flags11;
-/* rule2en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
-/* rule3en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
-/* rule4en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
-/* rule5en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
-/* rule6en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
-/* rule7en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
-/* rule8en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
-/* rule9en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
- u8 flags12;
-/* rule10en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
-/* rule11en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
-/* rule12en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
-/* rule13en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
-/* rule14en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
-/* rule15en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
-/* rule16en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
-/* rule17en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
- u8 flags13;
-/* rule18en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
-/* rule19en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
-/* rule20en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
-/* rule21en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
-/* rule22en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
-/* rule23en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
-/* rule24en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
-/* rule25en */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
- u8 flags14;
-/* bit16 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
-/* bit17 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
-/* bit18 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
-/* bit19 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-/* bit20 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
-/* bit21 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
-/* cf23 */
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
-#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 quota /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
- u8 byte3 /* byte3 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- u8 byte6 /* byte6 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
-};
-
-
-
-struct xstorm_eth_hw_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-/* exist_in_qm0 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-/* exist_in_qm1 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-/* exist_in_qm2 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-/* exist_in_qm3 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-/* bit4 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-/* cf_array_active */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-/* bit6 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-/* bit7 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-/* bit8 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-/* bit9 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-/* bit10 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-/* bit11 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-/* bit12 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4
-/* bit13 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5
-/* bit14 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-/* bit15 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
- u8 flags2;
-/* timer0cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-/* timer1cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-/* timer2cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-/* timer_stop_all */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
- u8 flags3;
-/* cf4 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-/* cf5 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-/* cf6 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-/* cf7 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-/* cf8 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-/* cf9 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-/* cf10 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-/* cf11 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
- u8 flags5;
-/* cf12 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-/* cf13 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-/* cf14 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-/* cf15 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
- u8 flags6;
-/* cf16 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-/* cf_array_cf */
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-/* cf18 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-/* cf19 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
- u8 flags7;
-/* cf20 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-/* cf21 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-/* cf22 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-/* cf0en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-/* cf1en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
- u8 flags8;
-/* cf2en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-/* cf3en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-/* cf4en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-/* cf5en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-/* cf6en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-/* cf7en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-/* cf8en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-/* cf9en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
- u8 flags9;
-/* cf10en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-/* cf11en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-/* cf12en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-/* cf13en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-/* cf14en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-/* cf15en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-/* cf16en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-/* cf_array_cf_en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
- u8 flags10;
-/* cf18en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-/* cf19en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-/* cf20en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-/* cf21en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-/* cf22en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-/* cf23en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-/* rule0en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-/* rule1en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
- u8 flags11;
-/* rule2en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-/* rule3en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-/* rule4en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-/* rule5en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-/* rule6en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-/* rule7en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-/* rule8en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-/* rule9en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
- u8 flags12;
-/* rule10en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-/* rule11en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-/* rule12en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-/* rule13en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-/* rule14en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-/* rule15en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-/* rule16en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-/* rule17en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
- u8 flags13;
-/* rule18en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-/* rule19en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-/* rule20en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-/* rule21en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-/* rule22en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-/* rule23en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-/* rule24en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-/* rule25en */
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
- u8 flags14;
-/* bit16 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-/* bit17 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-/* bit18 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-/* bit19 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-/* bit20 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-/* bit21 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-/* cf23 */
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 quota /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 tx_class /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
-};
-
-
#endif /* __ECORE_HSI_ETH__ */
diff --git a/drivers/net/qede/base/ecore_hsi_init_tool.h b/drivers/net/qede/base/ecore_hsi_init_tool.h
index 410b0bcb..1f57e9b2 100644
--- a/drivers/net/qede/base/ecore_hsi_init_tool.h
+++ b/drivers/net/qede/base/ecore_hsi_init_tool.h
@@ -22,6 +22,13 @@
/* Max size in dwords of a zipped array */
#define MAX_ZIPPED_SIZE 8192
+enum chip_ids {
+ CHIP_BB,
+ CHIP_K2,
+ CHIP_E5,
+ MAX_CHIP_IDS
+};
+
struct fw_asserts_ram_section {
/* The offset of the section in the RAM in RAM lines (64-bit units) */
@@ -69,51 +76,6 @@ struct fw_info_location {
__le32 size;
};
-
-
-
-enum init_modes {
- MODE_BB_A0,
- MODE_BB_B0,
- MODE_K2,
- MODE_ASIC,
- MODE_EMUL_REDUCED,
- MODE_EMUL_FULL,
- MODE_FPGA,
- MODE_CHIPSIM,
- MODE_SF,
- MODE_MF_SD,
- MODE_MF_SI,
- MODE_PORTS_PER_ENG_1,
- MODE_PORTS_PER_ENG_2,
- MODE_PORTS_PER_ENG_4,
- MODE_100G,
- MODE_40G,
- MODE_EAGLE_ENG1_WORKAROUND,
- MAX_INIT_MODES
-};
-
-
-enum init_phases {
- PHASE_ENGINE,
- PHASE_PORT,
- PHASE_PF,
- PHASE_VF,
- PHASE_QM_PF,
- MAX_INIT_PHASES
-};
-
-
-enum init_split_types {
- SPLIT_TYPE_NONE,
- SPLIT_TYPE_PORT,
- SPLIT_TYPE_PF,
- SPLIT_TYPE_PORT_PF,
- SPLIT_TYPE_VF,
- MAX_INIT_SPLIT_TYPES
-};
-
-
/*
* Binary buffer header
*/
@@ -204,8 +166,46 @@ union init_array_hdr {
};
+enum init_modes {
+ MODE_BB_A0_DEPRECATED,
+ MODE_BB,
+ MODE_K2,
+ MODE_ASIC,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_100G,
+ MODE_E5,
+ MAX_INIT_MODES
+};
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_VF,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
+};
+
/*
* init array types
diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c
index 7f4db0a0..2bcc32d3 100644
--- a/drivers/net/qede/base/ecore_hw.c
+++ b/drivers/net/qede/base/ecore_hw.c
@@ -86,7 +86,6 @@ void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
if (p_hwfn->p_ptt_pool)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
- p_hwfn->p_ptt_pool = OSAL_NULL;
}
struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
@@ -496,8 +495,8 @@ static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
return DMAE_REG_GO_C0 + (idx << 2);
}
-static enum _ecore_status_t
-ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+static enum _ecore_status_t ecore_dmae_post_command(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
u8 idx_cmd = p_hwfn->dmae_info.channel, i;
@@ -774,6 +773,18 @@ ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_status = ECORE_SUCCESS;
u32 offset = 0;
+ if (p_hwfn->p_dev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
+ "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%lx, type %d}, {dst: addr 0x%lx, type %d}, size %d].\n",
+ (unsigned long)src_addr, src_type,
+ (unsigned long)dst_addr, dst_type,
+ size_in_dwords);
+ /* Return success to let the flow to be completed successfully
+ * w/o any error handling.
+ */
+ return ECORE_SUCCESS;
+ }
+
ecore_dmae_opcode(p_hwfn,
(src_type == ECORE_DMAE_ADDRESS_GRC),
(dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
@@ -906,44 +917,6 @@ ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
return rc;
}
-u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
- enum protocol_type proto,
- union ecore_qm_pq_params *p_params)
-{
- u16 pq_id = 0;
-
- if ((proto == PROTOCOLID_CORE ||
- proto == PROTOCOLID_ETH) && !p_params) {
- DP_NOTICE(p_hwfn, true,
- "Protocol %d received NULL PQ params\n", proto);
- return 0;
- }
-
- switch (proto) {
- case PROTOCOLID_CORE:
- if (p_params->core.tc == LB_TC)
- pq_id = p_hwfn->qm_info.pure_lb_pq;
- else if (p_params->core.tc == OOO_LB_TC)
- pq_id = p_hwfn->qm_info.ooo_pq;
- else
- pq_id = p_hwfn->qm_info.offload_pq;
- break;
- case PROTOCOLID_ETH:
- pq_id = p_params->eth.tc;
- /* TODO - multi-CoS for VFs? */
- if (p_params->eth.is_vf)
- pq_id += p_hwfn->qm_info.vf_queues_offset +
- p_params->eth.vf_id;
- break;
- default:
- pq_id = 0;
- }
-
- pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
-
- return pq_id;
-}
-
void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
enum ecore_hw_err_type err_type)
{
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c
index de08650b..004ab351 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.c
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.c
@@ -17,112 +17,156 @@
#include "ecore_hsi_init_tool.h"
#include "ecore_iro.h"
#include "ecore_init_fw_funcs.h"
-enum CmInterfaceEnum {
- MCM_SEC,
- MCM_PRI,
- UCM_SEC,
- UCM_PRI,
- TCM_SEC,
- TCM_PRI,
- YCM_SEC,
- YCM_PRI,
- XCM_SEC,
- XCM_PRI,
- NUM_OF_CM_INTERFACES
+
+#define CDU_VALIDATION_DEFAULT_CFG 61
+
+static u16 con_region_offsets[3][E4_NUM_OF_CONNECTION_TYPES] = {
+ { 400, 336, 352, 304, 304, 384, 416, 352}, /* region 3 offsets */
+ { 528, 496, 416, 448, 448, 512, 544, 480}, /* region 4 offsets */
+ { 608, 544, 496, 512, 576, 592, 624, 560} /* region 5 offsets */
+};
+static u16 task_region_offsets[1][E4_NUM_OF_CONNECTION_TYPES] = {
+ { 240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
-/* general constants */
-#define QM_PQ_MEM_4KB(pq_size) \
-(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
-#define QM_PQ_SIZE_256B(pq_size) \
-(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0)
-#define QM_INVALID_PQ_ID 0xffff
-/* feature enable */
-#define QM_BYPASS_EN 1
-#define QM_BYTE_CRD_EN 1
-/* other PQ constants */
-#define QM_OTHER_PQS_PER_PF 4
-/* WFQ constants */
-#define QM_WFQ_UPPER_BOUND 62500000
+
+/* General constants */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
+ 0)
+#define QM_INVALID_PQ_ID 0xffff
+
+/* Feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+
+/* Other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+
+/* WFQ constants: */
+
+/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_WFQ_UPPER_BOUND 62500000
+
+/* Bit of VOQ in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_PF_SHIFT 5
+
+/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL 43750000
-/* RL constants */
-#define QM_RL_UPPER_BOUND 62500000
-#define QM_RL_PERIOD 5
+
+/* 0.7 * upper bound (62500000) */
+#define QM_WFQ_MAX_INC_VAL 43750000
+
+/* RL constants: */
+
+/* Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_RL_UPPER_BOUND 62500000
+
+/* Period in us */
+#define QM_RL_PERIOD 5
+
+/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
-#define QM_RL_MAX_INC_VAL 43750000
-/* RL increment value - the factor of 1.01 was added after seeing only
- * 99% factor reached in a 25Gbps port with DPDK RFC 2544 test.
- * In this scenario the PF RL was reducing the line rate to 99% although
- * the credit increment value was the correct one and FW calculated
- * correct packet sizes. The reason for the inaccuracy of the RL is
- * unknown at this point.
+
+/* 0.7 * upper bound (62500000) */
+#define QM_RL_MAX_INC_VAL 43750000
+
+/* RL increment value - rate is specified in mbps. the factor of 1.01 was
+ * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
+ * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
+ * although the credit increment value was the correct one and FW calculated
+ * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
+ * this point.
*/
-/* rate in mbps */
#define QM_RL_INC_VAL(rate) OSAL_MAX_T(u32, (u32)(((rate ? rate : 1000000) * \
- QM_RL_PERIOD * 101) / (8 * 100)), 1)
+ QM_RL_PERIOD * 101) / (8 * 100)), 1)
+
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
-/* Command Queue constants */
-#define PBF_CMDQ_PURE_LB_LINES 150
+
+/* Command Queue constants: */
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
#define PBF_CMDQ_LINES_RT_OFFSET(voq) \
-(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
-voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \
-- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \
-(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
-(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
+ (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
/* BTB: blocks constants (block size = 256B) */
-#define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */
-/* headroom per-port */
-#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS 38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR 10
-#define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO 7
+
/* QM stop command constants */
-#define QM_STOP_PQ_MASK_WIDTH 32
-#define QM_STOP_CMD_ADDR 0x2
-#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
-#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
-#define QM_STOP_CMD_GROUP_ID_OFFSET 1
-#define QM_STOP_CMD_GROUP_ID_SHIFT 16
-#define QM_STOP_CMD_GROUP_ID_MASK 15
-#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
-#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
-#define QM_STOP_CMD_PQ_TYPE_MASK 1
-#define QM_STOP_CMD_MAX_POLL_COUNT 100
-#define QM_STOP_CMD_POLL_PERIOD_US 500
+#define QM_STOP_CMD_PAUSE_MASK_MASK 0xffffffff /* @DPDK */
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+
/* QM command macros */
-#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
+#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, value) \
-SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+ SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
+
/* QM: VOQ macros */
#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \
-((port) * (max_phys_tcs_per_port) + (tc))
-#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
+ ((port) * (max_phys_tcs_per_port) + (tc))
+#define LB_VOQ(port) (MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phys_tcs_per_port) \
-((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port))
+ ((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : \
+ LB_VOQ(port))
+
+
/******************** INTERNAL IMPLEMENTATION *********************/
+
/* Prepare PF RL enable/disable runtime init values */
static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- /* enable RLs for all VOQs */
+ /* Enable RLs for all VOQs */
STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
(1 << MAX_NUM_VOQS) - 1);
- /* write RL period */
+
+ /* Write RL period */
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
QM_RL_UPPER_BOUND);
@@ -133,7 +177,8 @@ static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
@@ -145,12 +190,13 @@ static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
if (vport_rl_en) {
- /* write RL period (use timer 0 only) */
+ /* Write RL period (use timer 0 only) */
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
@@ -163,7 +209,8 @@ static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
- /* set credit threshold for QM bypass flow */
+
+ /* Set credit threshold for QM bypass flow */
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
QM_WFQ_UPPER_BOUND);
@@ -176,13 +223,9 @@ static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
u8 voq, u16 cmdq_lines)
{
u32 qm_line_crd;
- /* In A0 - Limit the size of pbf queue so that only 511 commands
- * with the minimum size of 4 (FCoE minimum size)
- */
- bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
- if (is_bb_a0)
- cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022);
+
qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+
OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq),
(u32)cmdq_lines);
STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd);
@@ -198,38 +241,43 @@ static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
port_params[MAX_NUM_PORTS])
{
u8 tc, voq, port_id, num_tcs_in_port;
- /* clear PBF lines for all VOQs */
+
+ /* Clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0);
+
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active) {
- u16 phys_lines, phys_lines_per_tc;
- /* find #lines to divide between active physical TCs */
- phys_lines =
- port_params[port_id].num_pbf_cmd_lines -
- PBF_CMDQ_PURE_LB_LINES;
- /* find #lines per active physical TC */
- num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- num_tcs_in_port++;
- }
- phys_lines_per_tc = phys_lines / num_tcs_in_port;
- /* init registers per active TC */
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- ecore_cmdq_lines_voq_rt_init(p_hwfn,
- voq, phys_lines_per_tc);
- }
+ u16 phys_lines, phys_lines_per_tc;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Find #lines to divide between the active physical TCs */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines -
+ PBF_CMDQ_PURE_LB_LINES;
+
+ /* Find #lines per active physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
+
+ /* Init registers per active TC */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, voq,
+ phys_lines_per_tc);
}
- /* init registers for pure LB TC */
- ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
- PBF_CMDQ_PURE_LB_LINES);
}
+
+ /* Init registers for pure LB TC */
+ ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
+ PBF_CMDQ_PURE_LB_LINES);
}
}
@@ -259,50 +307,51 @@ static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id, num_tcs_in_port;
u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, voq, port_id, num_tcs_in_port;
+
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
- if (port_params[port_id].active) {
- /* subtract headroom blocks */
- usable_blocks =
- port_params[port_id].num_btb_blocks -
- BTB_HEADROOM_BLOCKS;
-/* find blocks per physical TC. use factor to avoid floating arithmethic */
-
- num_tcs_in_port = 0;
- for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1)
- num_tcs_in_port++;
- pure_lb_blocks =
- (usable_blocks * BTB_PURE_LB_FACTOR) /
- (num_tcs_in_port *
- BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO);
- pure_lb_blocks =
- OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
- pure_lb_blocks / BTB_PURE_LB_FACTOR);
- phys_blocks =
- (usable_blocks -
- pure_lb_blocks) /
- num_tcs_in_port;
- /* init physical TCs */
- for (tc = 0;
- tc < NUM_OF_PHYS_TCS;
- tc++) {
- if (((port_params[port_id].active_phys_tcs >>
- tc) & 0x1) == 1) {
- voq = PHYS_VOQ(port_id, tc,
- max_phys_tcs_per_port);
- STORE_RT_REG(p_hwfn,
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* Find blocks per physical TC. use factor to avoid floating
+ * arithmethic.
+ */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1)
+ num_tcs_in_port++;
+
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks /
+ BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) /
+ num_tcs_in_port;
+
+ /* Init physical TCs */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >> tc) &
+ 0x1) == 1) {
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
- }
}
- /* init pure LB TC */
- STORE_RT_REG(p_hwfn,
- PBF_BTB_GUARANTEED_RT_OFFSET(
- LB_VOQ(port_id)), pure_lb_blocks);
}
+
+ /* Init pure LB TC */
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ(port_id)),
+ pure_lb_blocks);
}
}
@@ -323,59 +372,69 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
- u16 i, pq_id, pq_group;
- u16 num_pqs = num_pf_pqs + num_vf_pqs;
- u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
- u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
- bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev);
- /* a bit per Tx PQ indicating if the PQ is associated with a VF */
+ /* A bit per Tx PQ indicating if the PQ is associated with a VF */
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
- u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE;
- u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
- u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
- u32 mem_addr_4kb = base_mem_addr_4kb;
- /* set mapping from PQ group to PF */
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
+ u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
+ u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+ num_pqs = num_pf_pqs + num_vf_pqs;
+
+ first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
+
+ pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
+ vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Set mapping from PQ group to PF */
for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
- /* set PQ sizes */
+
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
QM_PQ_SIZE_256B(num_pf_cids));
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
QM_PQ_SIZE_256B(num_vf_cids));
- /* go over all Tx PQs */
+
+ /* Go over all Tx PQs */
for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
- struct qm_rf_pq_map tx_pq_map;
- u8 voq =
- VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
- bool is_vf_pq = (i >= num_pf_pqs);
- /* added to avoid compilation warning */
u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
- bool rl_valid = pq_params[i].rl_valid &&
- pq_params[i].vport_id < max_qm_global_rls;
- /* update first Tx PQ of VPORT/TC */
- u8 vport_id_in_pf = pq_params[i].vport_id - start_vport;
- u16 first_tx_pq_id =
- vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].
- tc_id];
+ struct qm_rf_pq_map tx_pq_map;
+ bool is_vf_pq, rl_valid;
+ u8 voq, vport_id_in_pf;
+ u16 first_tx_pq_id;
+
+ voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ is_vf_pq = (i >= num_pf_pqs);
+ rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
+ max_qm_global_rls;
+
+ /* Update first Tx PQ of VPORT/TC */
+ vport_id_in_pf = pq_params[i].vport_id - start_vport;
+ first_tx_pq_id =
+ vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
if (first_tx_pq_id == QM_INVALID_PQ_ID) {
- /* create new VP PQ */
+ /* Create new VP PQ */
vport_params[vport_id_in_pf].
first_tx_pq_id[pq_params[i].tc_id] = pq_id;
first_tx_pq_id = pq_id;
- /* map VP PQ to VOQ and PF */
+
+ /* Map VP PQ to VOQ and PF */
STORE_RT_REG(p_hwfn,
QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id,
(voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id <<
QM_WFQ_VP_PQ_PF_SHIFT));
}
- /* check RL ID */
+
+ /* Check RL ID */
if (pq_params[i].rl_valid && pq_params[i].vport_id >=
max_qm_global_rls)
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter config");
- /* fill PQ map entry */
+ "Invalid VPORT ID for rate limiter config\n");
+
+ /* Fill PQ map entry */
OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
@@ -386,44 +445,30 @@ static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
pq_params[i].wrr_group);
- /* write PQ map entry to CAM */
+
+ /* Write PQ map entry to CAM */
STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id,
*((u32 *)&tx_pq_map));
- /* set base address */
+
+ /* Set base address */
STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
mem_addr_4kb);
- /* check if VF PQ */
+
+ /* If VF PQ, add indication to PQ VF mask */
if (is_vf_pq) {
- /* if PQ is associated with a VF, add indication to PQ
- * VF mask
- */
- tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |=
- (1 << (pq_id % tx_pq_vf_mask_width));
+ tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
+ (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
mem_addr_4kb += vport_pq_mem_4kb;
} else {
mem_addr_4kb += pq_mem_4kb;
}
}
- /* store Tx PQ VF mask to size select register */
- for (i = 0; i < num_tx_pq_vf_masks; i++) {
- if (tx_pq_vf_mask[i]) {
- if (is_bb_a0) {
- /* A0-only: perform read-modify-write
- *(fixed in B0)
- */
- u32 curr_mask =
- is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt,
- QM_REG_MAXPQSIZETXSEL_0
- + i * 4);
- STORE_RT_REG(p_hwfn,
- QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
- i, curr_mask | tx_pq_vf_mask[i]);
- } else
- STORE_RT_REG(p_hwfn,
- QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
- i, tx_pq_vf_mask[i]);
- }
- }
+
+ /* Store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++)
+ if (tx_pq_vf_mask[i])
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
+ i, tx_pq_vf_mask[i]);
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -433,20 +478,26 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
u32 num_pf_cids,
u32 num_tids, u32 base_mem_addr_4kb)
{
- u16 i, pq_id;
-/* a single other PQ grp is used in each PF, where PQ group i is used in PF i */
-
- u16 pq_group = pf_id;
- u32 pq_size = num_pf_cids + num_tids;
- u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
- u32 mem_addr_4kb = base_mem_addr_4kb;
- /* map PQ group to PF */
+ u32 pq_size, pq_mem_4kb, mem_addr_4kb;
+ u16 i, pq_id, pq_group;
+
+ /* A single other PQ group is used in each PF, where PQ group i is used
+ * in PF i.
+ */
+ pq_group = pf_id;
+ pq_size = num_pf_cids + num_tids;
+ pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Map PQ group to PF */
STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
(u32)(pf_id));
- /* set PQ sizes */
+
+ /* Set PQ sizes */
STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
QM_PQ_SIZE_256B(pq_size));
- /* set base address */
+
+ /* Set base address */
for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
@@ -454,7 +505,10 @@ static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
mem_addr_4kb += pq_mem_4kb;
}
}
-/* Prepare PF WFQ runtime init values for specified PF. Return -1 on error. */
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
@@ -463,76 +517,89 @@ static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u16 num_tx_pqs,
struct init_qm_pq_params *pq_params)
{
+ u32 inc_val, crd_reg_offset;
+ u8 voq;
u16 i;
- u32 inc_val;
- u32 crd_reg_offset =
- (pf_id <
- MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
- QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB);
+
+ crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ (pf_id % MAX_NUM_PFS_BB);
+
inc_val = QM_WFQ_INC_VAL(pf_wfq);
- if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
return -1;
}
+
for (i = 0; i < num_tx_pqs; i++) {
- u8 voq =
- VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
+ voq = VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port);
OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB,
(u32)QM_WFQ_CRD_REG_SIGN_BIT);
}
+
STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val);
return 0;
}
-/* Prepare PF RL runtime init values for specified PF. Return -1 on error. */
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
- u32 inc_val = QM_RL_INC_VAL(pf_rl);
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
return -1;
}
+
STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
QM_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
return 0;
}
-/* Prepare VPORT WFQ runtime init values for the specified VPORTs. Return -1 on
- * error.
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
*/
static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
u8 num_vports,
struct init_qm_vport_params *vport_params)
{
- u8 tc, i;
+ u16 vport_pq_id;
u32 inc_val;
- /* go over all PF VPORTs */
+ u8 tc, i;
+
+ /* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (vport_params[i].vport_wfq) {
- inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true,
- "Invalid VPORT WFQ weight config");
- return -1;
- }
- /* each VPORT can have several VPORT PQ IDs for
- * different TCs
- */
- for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id =
- vport_params[i].first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID) {
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPCRD_RT_OFFSET +
- vport_pq_id,
- (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET
- + vport_pq_id, inc_val);
- }
+ if (!vport_params[i].vport_wfq)
+ continue;
+
+ inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
+ if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
+ }
+
+ /* Each VPORT can have several VPORT PQ IDs for various TCs */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ vport_pq_id = vport_params[i].first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID) {
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
}
}
}
@@ -548,19 +615,23 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_vport_params *vport_params)
{
u8 i, vport_id;
+ u32 inc_val;
+
if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
- /* go over all PF VPORTs */
+
+ /* Go over all PF VPORTs */
for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT rate-limit configuration");
+ "Invalid VPORT rate-limit configuration\n");
return -1;
}
+
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
(u32)QM_RL_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn,
@@ -569,6 +640,7 @@ static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
inc_val);
}
+
return 0;
}
@@ -576,17 +648,20 @@ static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
u32 reg_val, i;
- for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
i++) {
OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
}
- /* check if timeout while waiting for SDM command ready */
+
+ /* Check if timeout while waiting for SDM command ready */
if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
"Timeout waiting for QM SDM cmd ready signal\n");
return false;
}
+
return true;
}
@@ -596,15 +671,19 @@ static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
{
if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
+
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
}
+
/******************** INTERFACE IMPLEMENTATION *********************/
+
u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
@@ -625,32 +704,42 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_port_params
port_params[MAX_NUM_PORTS])
{
- /* init AFullOprtnstcCrdMask */
- u32 mask =
- (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
- (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
- (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
- (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
- (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
- (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
- (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
- (QM_OPPOR_PQ_EMPTY_DEF <<
- QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
+ u32 mask;
+
+ /* Init AFullOprtnstcCrdMask */
+ mask = (QM_OPPOR_LINE_VOQ_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
+ (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
+ (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
+ (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
+ (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
+ (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
+ (QM_OPPOR_FW_STOP_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
+ (QM_OPPOR_PQ_EMPTY_DEF <<
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
- /* enable/disable PF RL */
+
+ /* Enable/disable PF RL */
ecore_enable_pf_rl(p_hwfn, pf_rl_en);
- /* enable/disable PF WFQ */
+
+ /* Enable/disable PF WFQ */
ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
- /* enable/disable VPORT RL */
+
+ /* Enable/disable VPORT RL */
ecore_enable_vport_rl(p_hwfn, vport_rl_en);
- /* enable/disable VPORT WFQ */
+
+ /* Enable/disable VPORT WFQ */
ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
- /* init PBF CMDQ line credit */
+
+ /* Init PBF CMDQ line credit */
ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
max_phys_tcs_per_port, port_params);
- /* init BTB blocks in PBF */
+
+ /* Init BTB blocks in PBF */
ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
max_phys_tcs_per_port, port_params);
+
return 0;
}
@@ -673,66 +762,86 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params)
{
+ u32 other_mem_size_4kb;
u8 tc, i;
- u32 other_mem_size_4kb =
- QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
- /* clear first Tx PQ ID array for each VPORT */
+
+ other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
+ QM_OTHER_PQS_PER_PF;
+
+ /* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
- /* map Other PQs (if any) */
+
+ /* Map Other PQs (if any) */
#if QM_OTHER_PQS_PER_PF > 0
ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids,
num_tids, 0);
#endif
- /* map Tx PQs */
+
+ /* Map Tx PQs */
ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
max_phys_tcs_per_port, is_first_pf, num_pf_cids,
num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs,
start_vport, other_mem_size_4kb, pq_params,
vport_params);
- /* init PF WFQ */
+
+ /* Init PF WFQ */
if (pf_wfq)
if (ecore_pf_wfq_rt_init
(p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
- num_pf_pqs + num_vf_pqs, pq_params) != 0)
+ num_pf_pqs + num_vf_pqs, pq_params))
return -1;
- /* init PF RL */
- if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0)
+
+ /* Init PF RL */
+ if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
return -1;
- /* set VPORT WFQ */
- if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0)
+
+ /* Set VPORT WFQ */
+ if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
return -1;
- /* set VPORT RL */
+
+ /* Set VPORT RL */
if (ecore_vport_rl_rt_init
- (p_hwfn, start_vport, num_vports, vport_params) != 0)
+ (p_hwfn, start_vport, num_vports, vport_params))
return -1;
+
return 0;
}
int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
- if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration");
+ u32 inc_val;
+
+ inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF WFQ weight configuration\n");
return -1;
}
+
ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
return 0;
}
int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
{
- u32 inc_val = QM_RL_INC_VAL(pf_rl);
+ u32 inc_val;
+
+ inc_val = QM_RL_INC_VAL(pf_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration");
+ DP_NOTICE(p_hwfn, true,
+ "Invalid PF rate limit configuration\n");
return -1;
}
+
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
(u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
return 0;
}
@@ -740,20 +849,25 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
{
+ u16 vport_pq_id;
+ u32 inc_val;
u8 tc;
- u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
- if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) {
+
+ inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT WFQ weight configuration");
+ "Invalid VPORT WFQ weight configuration\n");
return -1;
}
+
for (tc = 0; tc < NUM_OF_TCS; tc++) {
- u16 vport_pq_id = first_tx_pq_id[tc];
+ vport_pq_id = first_tx_pq_id[tc];
if (vport_pq_id != QM_INVALID_PQ_ID) {
ecore_wr(p_hwfn, p_ptt,
QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
}
}
+
return 0;
}
@@ -761,20 +875,24 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl)
{
u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
+
if (vport_id >= max_qm_global_rls) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT ID for rate limiter configuration");
+ "Invalid VPORT ID for rate limiter configuration\n");
return -1;
}
+
inc_val = QM_RL_INC_VAL(vport_rl);
if (inc_val > QM_RL_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, true,
- "Invalid VPORT rate-limit configuration");
+ "Invalid VPORT rate-limit configuration\n");
return -1;
}
+
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
(u32)QM_RL_CRD_REG_SIGN_BIT);
ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
+
return 0;
}
@@ -784,15 +902,20 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
- u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
- /* set command's PQ type */
+ u32 pq_mask = 0, last_pq, pq_id;
+
+ last_pq = start_pq + num_pqs - 1;
+
+ /* Set command's PQ type */
QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
- /* go over requested PQs */
+
+ /* Go over requested PQs */
for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
- /* set PQ bit in mask (stop command only) */
+ /* Set PQ bit in mask (stop command only) */
if (!is_release_cmd)
pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
- /* if last PQ or end of PQ mask, write command */
+
+ /* If last PQ or end of PQ mask, write command */
if ((pq_id == last_pq) ||
(pq_id % QM_STOP_PQ_MASK_WIDTH ==
(QM_STOP_PQ_MASK_WIDTH - 1))) {
@@ -807,68 +930,92 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
pq_mask = 0;
}
}
+
return true;
}
+
/* NIG: ETS configuration constants */
#define NIG_TX_ETS_CLIENT_OFFSET 4
#define NIG_LB_ETS_CLIENT_OFFSET 1
#define NIG_ETS_MIN_WFQ_BYTES 1600
+
/* NIG: ETS constants */
#define NIG_ETS_UP_BOUND(weight, mtu) \
-(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
/* NIG: RL constants */
-#define NIG_RL_BASE_TYPE 1 /* byte base type */
-#define NIG_RL_PERIOD 1 /* in us */
+
+/* Byte base type value */
+#define NIG_RL_BASE_TYPE 1
+
+/* Period in us */
+#define NIG_RL_PERIOD 1
+
+/* Period in 25MHz cycles */
#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD)
+
+/* Rate in mbps */
#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8)
+
#define NIG_RL_MAX_VAL(inc_val, mtu) \
-(2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+ (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
+
/* NIG: packet prioritry configuration constants */
-#define NIG_PRIORITY_MAP_TC_BITS 4
+#define NIG_PRIORITY_MAP_TC_BITS 4
+
+
void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req, bool is_lb)
{
- u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
- u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
- u8 tc_client_offset =
- is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET;
- u32 min_weight = 0xffffffff;
- u32 tc_weight_base_addr =
- is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
- NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
- u32 tc_weight_addr_diff =
- is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
- NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
- NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
- u32 tc_bound_base_addr =
- is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
- NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
- u32 tc_bound_addr_diff =
- is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
- NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
- NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
- NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
+ u32 tc_bound_base_addr, tc_bound_addr_diff;
+ u8 sp_tc_map = 0, wfq_tc_map = 0;
+ u8 tc, num_tc, tc_client_offset;
+
+ num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
+ tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
+ NIG_TX_ETS_CLIENT_OFFSET;
+ min_weight = 0xffffffff;
+ tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
+ NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
+ tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+ tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
+ NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
+
for (tc = 0; tc < num_tc; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- /* update SP map */
+
+ /* Update SP map */
if (tc_req->use_sp)
sp_tc_map |= (1 << tc);
- if (tc_req->use_wfq) {
- /* update WFQ map */
- wfq_tc_map |= (1 << tc);
- /* find minimal weight */
- if (tc_req->weight < min_weight)
- min_weight = tc_req->weight;
- }
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
}
- /* write SP map */
+
+ /* Write SP map */
ecore_wr(p_hwfn, p_ptt,
is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
NIG_REG_TX_ARB_CLIENT_IS_STRICT,
(sp_tc_map << tc_client_offset));
- /* write WFQ map */
+
+ /* Write WFQ map */
ecore_wr(p_hwfn, p_ptt,
is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
@@ -876,22 +1023,23 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
/* write WFQ weights */
for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- if (tc_req->use_wfq) {
- /* translate weight to bytes */
- u32 byte_weight =
- (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
- min_weight;
- /* write WFQ weight */
- ecore_wr(p_hwfn, p_ptt,
- tc_weight_base_addr +
- tc_weight_addr_diff * tc_client_offset,
- byte_weight);
- /* write WFQ upper bound */
- ecore_wr(p_hwfn, p_ptt,
- tc_bound_base_addr +
- tc_bound_addr_diff * tc_client_offset,
- NIG_ETS_UP_BOUND(byte_weight, req->mtu));
- }
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
+ tc_weight_addr_diff * tc_client_offset, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
+ tc_bound_addr_diff * tc_client_offset,
+ NIG_ETS_UP_BOUND(byte_weight, req->mtu));
}
}
@@ -899,16 +1047,18 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_lb_rl_req *req)
{
- u8 tc;
u32 ctrl, inc_val, reg_offset;
- /* disable global MAC+LB RL */
+ u8 tc;
+
+ /* Disable global MAC+LB RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
- /* configure and enable global MAC+LB RL */
+
+ /* Configure and enable global MAC+LB RL */
if (req->lb_mac_rate) {
- /* configure */
+ /* Configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
@@ -916,20 +1066,23 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
- /* enable */
+
+ /* Enable */
ctrl |=
1 <<
NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
}
- /* disable global LB-only RL */
+
+ /* Disable global LB-only RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
- /* configure and enable global LB-only RL */
+
+ /* Configure and enable global LB-only RL */
if (req->lb_rate) {
- /* configure */
+ /* Configure */
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
NIG_RL_PERIOD_CLK_25M);
inc_val = NIG_RL_INC_VAL(req->lb_rate);
@@ -937,41 +1090,41 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
inc_val);
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
NIG_RL_MAX_VAL(inc_val, req->mtu));
- /* enable */
+
+ /* Enable */
ctrl |=
1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
}
- /* per-TC RLs */
+
+ /* Per-TC RLs */
for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
tc++, reg_offset += 4) {
- /* disable TC RL */
+ /* Disable TC RL */
ctrl =
NIG_RL_BASE_TYPE <<
NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
ecore_wr(p_hwfn, p_ptt,
NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
- /* configure and enable TC RL */
- if (req->tc_rate[tc]) {
- /* configure */
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
- reg_offset, NIG_RL_PERIOD_CLK_25M);
- inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
- reg_offset, inc_val);
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
- reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
- /* enable */
- ctrl |=
- 1 <<
- NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
- ecore_wr(p_hwfn, p_ptt,
- NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset,
- ctrl);
- }
+
+ /* Configure and enable TC RL */
+ if (!req->tc_rate[tc])
+ continue;
+
+ /* Configure */
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
+ reg_offset, NIG_RL_PERIOD_CLK_25M);
+ inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
+ reg_offset, inc_val);
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
+ reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
+
+ /* Enable */
+ ctrl |= 1 <<
+ NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
+ ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
+ reg_offset, ctrl);
}
}
@@ -979,20 +1132,23 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_pri_tc_map_req *req)
{
- u8 pri, tc;
- u32 pri_tc_mask = 0;
u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
+ u32 pri_tc_mask = 0;
+ u8 pri, tc;
+
for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
- if (req->pri[pri].valid) {
- pri_tc_mask |=
- (req->pri[pri].
- tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS));
- tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
- }
+ if (!req->pri[pri].valid)
+ continue;
+
+ pri_tc_mask |= (req->pri[pri].tc_id <<
+ (pri * NIG_PRIORITY_MAP_TC_BITS));
+ tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
}
- /* write priority -> TC mask */
+
+ /* Write priority -> TC mask */
ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
- /* write TC -> priority mask */
+
+ /* Write TC -> priority mask */
for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
tc_pri_mask[tc]);
@@ -1001,110 +1157,133 @@ void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
}
}
+
/* PRS: ETS configuration constants */
-#define PRS_ETS_MIN_WFQ_BYTES 1600
+#define PRS_ETS_MIN_WFQ_BYTES 1600
#define PRS_ETS_UP_BOUND(weight, mtu) \
-(2 * ((weight) > (mtu) ? (weight) : (mtu)))
+ (2 * ((weight) > (mtu) ? (weight) : (mtu)))
+
+
void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_ets_req *req)
{
+ u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
- u32 min_weight = 0xffffffff;
- u32 tc_weight_addr_diff =
- PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
- u32 tc_bound_addr_diff =
- PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
- PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+
+ tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
+ PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
+ tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
+ PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
+
for (tc = 0; tc < NUM_OF_TCS; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- /* update SP map */
+
+ /* Update SP map */
if (tc_req->use_sp)
sp_tc_map |= (1 << tc);
- if (tc_req->use_wfq) {
- /* update WFQ map */
- wfq_tc_map |= (1 << tc);
- /* find minimal weight */
- if (tc_req->weight < min_weight)
- min_weight = tc_req->weight;
- }
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Update WFQ map */
+ wfq_tc_map |= (1 << tc);
+
+ /* Find minimal weight */
+ if (tc_req->weight < min_weight)
+ min_weight = tc_req->weight;
}
+
/* write SP map */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
+
/* write WFQ map */
ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
wfq_tc_map);
+
/* write WFQ weights */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
struct init_ets_tc_req *tc_req = &req->tc_req[tc];
- if (tc_req->use_wfq) {
- /* translate weight to bytes */
- u32 byte_weight =
- (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
- min_weight;
- /* write WFQ weight */
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 +
- tc * tc_weight_addr_diff, byte_weight);
- /* write WFQ upper bound */
- ecore_wr(p_hwfn, p_ptt,
- PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
- tc * tc_bound_addr_diff,
- PRS_ETS_UP_BOUND(byte_weight, req->mtu));
- }
+ u32 byte_weight;
+
+ if (!tc_req->use_wfq)
+ continue;
+
+ /* Translate weight to bytes */
+ byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
+ min_weight;
+
+ /* Write WFQ weight */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
+ tc_weight_addr_diff, byte_weight);
+
+ /* Write WFQ upper bound */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
+ tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
+ req->mtu));
}
}
+
/* BRB: RAM configuration constants */
#define BRB_TOTAL_RAM_BLOCKS_BB 4800
#define BRB_TOTAL_RAM_BLOCKS_K2 5632
-#define BRB_BLOCK_SIZE 128 /* in bytes */
+#define BRB_BLOCK_SIZE 128
#define BRB_MIN_BLOCKS_PER_TC 9
-#define BRB_HYST_BYTES 10240
-#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
-/*
- * temporary big RAM allocation - should be updated
- */
+#define BRB_HYST_BYTES 10240
+#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
+
+/* Temporary big RAM allocation - should be updated */
void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
{
- u8 port, active_ports = 0;
+ u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
u32 active_port_blocks, reg_offset = 0;
- u32 tc_headroom_blocks =
- (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE);
- u32 min_pkt_size_blocks =
- (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE);
- u32 total_blocks =
- ECORE_IS_K2(p_hwfn->
- p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
- BRB_TOTAL_RAM_BLOCKS_BB;
- /* find number of active ports */
+ u8 port, active_ports = 0;
+
+ tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
+ BRB_BLOCK_SIZE);
+ min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
+ BRB_BLOCK_SIZE);
+ total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
+ BRB_TOTAL_RAM_BLOCKS_BB;
+
+ /* Find number of active ports */
for (port = 0; port < MAX_NUM_PORTS; port++)
if (req->num_active_tcs[port])
active_ports++;
+
active_port_blocks = (u32)(total_blocks / active_ports);
+
for (port = 0; port < req->max_ports_per_engine; port++) {
- /* calculate per-port sizes */
- u32 tc_guaranteed_blocks =
- (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE);
- u32 port_blocks =
- req->num_active_tcs[port] ? active_port_blocks : 0;
- u32 port_guaranteed_blocks =
- req->num_active_tcs[port] * tc_guaranteed_blocks;
- u32 port_shared_blocks = port_blocks - port_guaranteed_blocks;
- u32 full_xoff_th =
- req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC;
- u32 full_xon_th = full_xoff_th + min_pkt_size_blocks;
- u32 pause_xoff_th = tc_headroom_blocks;
- u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+ u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
+ u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
+ u32 tc_guaranteed_blocks;
u8 tc;
- /* init total size per port */
+
+ /* Calculate per-port sizes */
+ tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
+ BRB_BLOCK_SIZE);
+ port_blocks = req->num_active_tcs[port] ? active_port_blocks :
+ 0;
+ port_guaranteed_blocks = req->num_active_tcs[port] *
+ tc_guaranteed_blocks;
+ port_shared_blocks = port_blocks - port_guaranteed_blocks;
+ full_xoff_th = req->num_active_tcs[port] *
+ BRB_MIN_BLOCKS_PER_TC;
+ full_xon_th = full_xoff_th + min_pkt_size_blocks;
+ pause_xoff_th = tc_headroom_blocks;
+ pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
+
+ /* Init total size per port */
ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
port_blocks);
- /* init shared size per port */
+
+ /* Init shared size per port */
ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
port_shared_blocks);
+
for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
- /* clear init values for non-active TCs */
+ /* Clear init values for non-active TCs */
if (tc == req->num_active_tcs[port]) {
tc_guaranteed_blocks = 0;
full_xoff_th = 0;
@@ -1112,15 +1291,18 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
pause_xoff_th = 0;
pause_xon_th = 0;
}
- /* init guaranteed size per TC */
+
+ /* Init guaranteed size per TC */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_TC_GUARANTIED_0 + reg_offset,
tc_guaranteed_blocks);
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
BRB_HYST_BLOCKS);
-/* init pause/full thresholds per physical TC - for loopback traffic */
+ /* Init pause/full thresholds per physical TC - for
+ * loopback traffic.
+ */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
@@ -1133,7 +1315,10 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
ecore_wr(p_hwfn, p_ptt,
BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
reg_offset, pause_xon_th);
-/* init pause/full thresholds per physical TC - for main traffic */
+
+ /* Init pause/full thresholds per physical TC - for
+ * main traffic.
+ */
ecore_wr(p_hwfn, p_ptt,
BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
reg_offset, full_xoff_th);
@@ -1150,23 +1335,25 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
}
}
-/*In MF should be called once per engine to set EtherType of OuterTag*/
+/* In MF should be called once per engine to set EtherType of OuterTag */
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
{
- /* update PRS register */
+ /* Update PRS register */
STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
- /* update NIG register */
+
+ /* Update NIG register */
STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
- /* update PBF register */
+
+ /* Update PBF register */
STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, ethType);
}
-/*In MF should be called once per port to set EtherType of OuterTag*/
+/* In MF should be called once per port to set EtherType of OuterTag */
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType)
{
- /* update DORQ register */
+ /* Update DORQ register */
STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
}
@@ -1176,11 +1363,13 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
- /* update PRS register */
+ /* Update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
- /* update NIG register */
+
+ /* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
- /* update PBF register */
+
+ /* Update PBF register */
ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
}
@@ -1188,23 +1377,26 @@ void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, bool vxlan_enable)
{
u32 reg_val;
- /* update PRS register */
+
+ /* Update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
- /* update NIG register */
+
+ /* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
vxlan_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- /* update DORQ register */
+
+ /* Update DORQ register */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
vxlan_enable ? 1 : 0);
}
@@ -1214,7 +1406,8 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
bool eth_gre_enable, bool ip_gre_enable)
{
u32 reg_val;
- /* update PRS register */
+
+ /* Update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
@@ -1224,10 +1417,11 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
- /* update NIG register */
+
+ /* Update NIG register */
reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
@@ -1236,7 +1430,8 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
ip_gre_enable);
ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
- /* update DORQ registers */
+
+ /* Update DORQ registers */
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
eth_gre_enable ? 1 : 0);
ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
@@ -1246,14 +1441,13 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 dest_port)
{
- /* geneve tunnel not supported in BB_A0 */
- if (ECORE_IS_BB_A0(p_hwfn->p_dev))
- return;
- /* update PRS register */
+ /* Update PRS register */
ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
- /* update NIG register */
+
+ /* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
- /* update PBF register */
+
+ /* Update PBF register */
ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
}
@@ -1262,10 +1456,8 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
bool eth_geneve_enable, bool ip_geneve_enable)
{
u32 reg_val;
- /* geneve tunnel not supported in BB_A0 */
- if (ECORE_IS_BB_A0(p_hwfn->p_dev))
- return;
- /* update PRS register */
+
+ /* Update PRS register */
reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
@@ -1275,42 +1467,75 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
ip_geneve_enable);
ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
- ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
(u32)PRS_ETH_TUNN_FIC_FORMAT);
}
- /* update NIG register */
+
+ /* Update NIG register */
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
eth_geneve_enable ? 1 : 0);
ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
ip_geneve_enable ? 1 : 0);
- /* comp ver */
- reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
- ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
- ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
- ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
- /* EDPM with geneve tunnel not supported in BB_B0 */
+
+ /* EDPM with geneve tunnel not supported in BB */
if (ECORE_IS_BB_B0(p_hwfn->p_dev))
return;
- /* update DORQ registers */
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+
+ /* Update DORQ registers */
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
eth_geneve_enable ? 1 : 0);
- ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
ip_geneve_enable ? 1 : 0);
}
+
#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
-#define PARSER_ETH_CONN_CM_HDR (0x0)
+#define PARSER_ETH_CONN_CM_HDR 0
#define CAM_LINE_SIZE sizeof(u32)
#define RAM_LINE_SIZE sizeof(u64)
#define REG_SIZE sizeof(u32)
+void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id)
+{
+ union gft_cam_line_union cam_line;
+ struct gft_ram_line ram_line;
+ u32 i, *ram_line_ptr;
+
+ ram_line_ptr = (u32 *)&ram_line;
+
+ /* Stop using gft logic, disable gft search */
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, 0x0);
+
+ /* Clean ram & cam for next rfs/gft session*/
+
+ /* Zero camline */
+ OSAL_MEMSET(&cam_line, 0, sizeof(cam_line));
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ cam_line.cam_line_mapped.camline);
+
+ /* Zero ramline */
+ OSAL_MEMSET(&ram_line, 0, sizeof(ram_line));
+
+ /* Each iteration write to reg */
+ for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
+ ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
+ RAM_LINE_SIZE * pf_id +
+ i * REG_SIZE, *(ram_line_ptr + i));
+}
+
+
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- /* set RFS event ID to be awakened i Tstorm By Prs */
- u32 rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
+ u32 rfs_cm_hdr_event_id;
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
@@ -1331,39 +1556,48 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
struct gft_ram_line ramLine;
u32 *ramLinePointer = (u32 *)&ramLine;
int i;
+
if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, true,
"set_rfs_mode_enable: must accept at "
"least on of - ipv4 or ipv6");
+
if (!tcp && !udp)
DP_NOTICE(p_hwfn, true,
"set_rfs_mode_enable: must accept at "
"least on of - udp or tcp");
- /* set RFS event ID to be awakened i Tstorm By Prs */
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
rfs_cm_hdr_event_id |= T_ETH_PACKET_MATCH_RFS_EVENTID <<
PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
rfs_cm_hdr_event_id |= PARSER_ETH_CONN_CM_HDR <<
PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
+
/* Configure Registers for RFS mode */
-/* enable gft search */
+
+ /* Enable gft search */
ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); /* do not load
* context only cid
* in PRS on match
*/
camLine.cam_line_mapped.camline = 0;
- /* cam line is now valid!! */
+
+ /* Cam line is now valid!! */
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_VALID, 1);
- /* filters are per PF!! */
+
+ /* Filters are per PF!! */
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID_MASK, 1);
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
if (!(tcp && udp)) {
SET_FIELD(camLine.cam_line_mapped.camline,
- GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, 1);
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
if (tcp)
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
@@ -1373,6 +1607,7 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
GFT_PROFILE_UDP_PROTOCOL);
}
+
if (!(ipv4 && ipv6)) {
SET_FIELD(camLine.cam_line_mapped.camline,
GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
@@ -1385,44 +1620,53 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
GFT_CAM_LINE_MAPPED_IP_VERSION,
GFT_PROFILE_IPV6);
}
- /* write characteristics to cam */
+
+ /* Write characteristics to cam */
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
camLine.cam_line_mapped.camline);
camLine.cam_line_mapped.camline =
ecore_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
- /* write line to RAM - compare to filter 4 tuple */
- ramLine.low32bits = 0;
- ramLine.high32bits = 0;
- SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_DST_IP, 1);
- SET_FIELD(ramLine.high32bits, GFT_RAM_LINE_SRC_IP, 1);
- SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_SRC_PORT, 1);
- SET_FIELD(ramLine.low32bits, GFT_RAM_LINE_DST_PORT, 1);
- /* each iteration write to reg */
+
+ /* Write line to RAM - compare to filter 4 tuple */
+ ramLine.lo = 0;
+ ramLine.hi = 0;
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(ramLine.hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(ramLine.lo, GFT_RAM_LINE_DST_PORT, 1);
+
+ /* Each iteration write to reg */
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * pf_id +
i * REG_SIZE, *(ramLinePointer + i));
- /* set default profile so that no filter match will happen */
- ramLine.low32bits = 0xffff;
- ramLine.high32bits = 0xffff;
+
+ /* Set default profile so that no filter match will happen */
+ ramLine.lo = 0xffff;
+ ramLine.hi = 0xffff;
for (i = 0; i < RAM_LINE_SIZE / REG_SIZE; i++)
ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
RAM_LINE_SIZE * PRS_GFT_CAM_LINES_NO_MATCH +
i * REG_SIZE, *(ramLinePointer + i));
}
-/* Configure VF zone size mode*/
+/* Configure VF zone size mode */
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u16 mode,
bool runtime_init)
{
u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
u32 msdm_vf_offset_mask;
+
if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
msdm_vf_size_log += 1;
else if (mode == VF_ZONE_SIZE_MODE_QUAD)
msdm_vf_size_log += 2;
+
msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
+
if (runtime_init) {
STORE_RT_REG(p_hwfn,
PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
@@ -1438,12 +1682,13 @@ void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
}
}
-/* get mstorm statistics for offset by VF zone size mode*/
+/* Get mstorm statistics for offset by VF zone size mode */
u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
u16 stat_cnt_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
+
if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
(stat_cnt_id > MAX_NUM_PFS)) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
@@ -1453,16 +1698,18 @@ u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
(stat_cnt_id - MAX_NUM_PFS);
}
+
return offset;
}
-/* get mstorm VF producer offset by VF zone size mode*/
+/* Get mstorm VF producer offset by VF zone size mode */
u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
u8 vf_id,
u8 vf_queue_id,
u16 vf_zone_size_mode)
{
u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
+
if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
@@ -1471,5 +1718,166 @@ u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
vf_id;
}
+
return offset;
}
+
+/* Calculate CRC8 of first 4 bytes in buf */
+static u8 ecore_calc_crc8(const u8 *buf)
+{
+ u32 i, j, crc = 0xff << 8;
+
+ /* CRC-8 polynomial */
+ #define POLY 0x1070
+
+ for (j = 0; j < 4; j++, buf++) {
+ crc ^= (*buf << 8);
+ for (i = 0; i < 8; i++) {
+ if (crc & 0x8000)
+ crc ^= (POLY << 3);
+
+ crc <<= 1;
+ }
+ }
+
+ return (u8)(crc >> 8);
+}
+
+/* Calculate and return CDU validation byte per conneciton type / region /
+ * cid
+ */
+static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region,
+ u32 cid)
+{
+ const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+ u8 crc, validation_byte = 0;
+ u32 validation_string = 0;
+ const u8 *data_to_crc_rev;
+ u8 data_to_crc[4];
+
+ data_to_crc_rev = (const u8 *)&validation_string;
+
+ /*
+ * The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [7:4] = Region
+ * [3:0] = Type
+ */
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+
+ /* Convert to big-endian (ntoh())*/
+ data_to_crc[0] = data_to_crc_rev[3];
+ data_to_crc[1] = data_to_crc_rev[2];
+ data_to_crc[2] = data_to_crc_rev[1];
+ data_to_crc[3] = data_to_crc_rev[0];
+
+ crc = ecore_calc_crc8(data_to_crc);
+
+ validation_byte |= ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+ if ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+ else
+ validation_byte |= crc & 0x7F;
+
+ return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 tid)
+{
+ u8 *p_ctx, *region1_val_ptr;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+ u8 x_val, t_val, u_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ x_val = *x_val_ptr;
+ t_val = *t_val_ptr;
+ u_val = *u_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = x_val;
+ *t_val_ptr = t_val;
+ *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void ecore_memset_task_ctx(void *p_ctx_mem, const u32 ctx_size,
+ const u8 ctx_type)
+{
+ u8 *p_ctx, *region1_val_ptr;
+ u8 region1_val;
+
+ p_ctx = (u8 *)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ region1_val = *region1_val_ptr;
+
+ OSAL_MEMSET(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 ctx_validation;
+
+ /* Enable validation for connection region 3 - bits [31:24] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+ /* Enable validation for connection region 5 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+ /* Enable validation for connection region 1 - bits [15: 8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
+}
diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h
index 9df0e7de..4da3fc29 100644
--- a/drivers/net/qede/base/ecore_init_fw_funcs.h
+++ b/drivers/net/qede/base/ecore_init_fw_funcs.h
@@ -8,20 +8,22 @@
#ifndef _INIT_FW_FUNCS_H
#define _INIT_FW_FUNCS_H
-/* forward declarations */
+/* Forward declarations */
+
struct init_qm_pq_params;
+
/**
- * @brief ecore_qm_pf_mem_size - prepare QM ILT sizes
+ * @brief ecore_qm_pf_mem_size - Prepare QM ILT sizes
*
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
*
* @return The required host memory size in 4KB units.
*/
@@ -31,6 +33,7 @@ u32 ecore_qm_pf_mem_size(u8 pf_id,
u32 num_tids,
u16 num_pf_pqs,
u16 num_vf_pqs);
+
/**
* @brief ecore_qm_common_rt_init - Prepare QM runtime init values for engine
* phase
@@ -38,10 +41,10 @@ u32 ecore_qm_pf_mem_size(u8 pf_id,
* @param p_hwfn
* @param max_ports_per_engine - max number of ports per engine in HW
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param pf_rl_en - enable per-PF rate limiters
- * @param pf_wfq_en - enable per-PF WFQ
- * @param vport_rl_en - enable per-VPORT rate limiters
- * @param vport_wfq_en - enable per-VPORT WFQ
+ * @param pf_rl_en - enable per-PF rate limiters
+ * @param pf_wfq_en - enable per-PF WFQ
+ * @param vport_rl_en - enable per-VPORT rate limiters
+ * @param vport_wfq_en - enable per-VPORT WFQ
* @param port_params - array of size MAX_NUM_PORTS with params for each port
*
* @return 0 on success, -1 on error.
@@ -54,22 +57,24 @@ int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
bool vport_rl_en,
bool vport_wfq_en,
struct init_qm_port_params port_params[MAX_NUM_PORTS]);
+
/**
* @brief ecore_qm_pf_rt_init Prepare QM runtime init values for the PF phase
*
* @param p_hwfn
* @param p_ptt - ptt window used for writing the registers
- * @param port_id - port ID
- * @param pf_id - PF ID
+ * @param port_id - port ID
+ * @param pf_id - PF ID
* @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
- * @param num_pf_cids - number of connections used by this PF
+ * @param is_first_pf - 1 = first PF in engine, 0 = othwerwise
+ * @param num_pf_cids - number of connections used by this PF
* @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param start_pq - first Tx PQ ID associated with this PF
- * @param num_pf_pqs - number of Tx PQs associated with this PF (non-VF)
- * @param num_vf_pqs - number of Tx PQs associated with a VF
- * @param start_vport - first VPORT ID associated with this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param start_pq - first Tx PQ ID associated with this PF
+ * @param num_pf_pqs - number of Tx PQs associated with this PF
+ * (non-VF)
+ * @param num_vf_pqs - number of Tx PQs associated with a VF
+ * @param start_vport - first VPORT ID associated with this PF
* @param num_vports - number of VPORTs associated with this PF
* @param pf_wfq - WFQ weight. if PF WFQ is globally disabled, the weight must
* be 0. otherwise, the weight must be non-zero.
@@ -100,6 +105,7 @@ int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
u32 pf_rl,
struct init_qm_pq_params *pq_params,
struct init_qm_vport_params *vport_params);
+
/**
* @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF
*
@@ -114,11 +120,12 @@ int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 pf_id,
u16 pf_wfq);
+
/**
- * @brief ecore_init_pf_rl Initializes the rate limit of the specified PF
+ * @brief ecore_init_pf_rl - Initializes the rate limit of the specified PF
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
+ * @param p_ptt - ptt window used for writing the registers
* @param pf_id - PF ID
* @param pf_rl - rate limit in Mb/sec units
*
@@ -128,6 +135,7 @@ int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 pf_id,
u32 pf_rl);
+
/**
* @brief ecore_init_vport_wfq Initializes the WFQ weight of specified VPORT
*
@@ -144,10 +152,12 @@ int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS],
u16 vport_wfq);
+
/**
- * @brief ecore_init_vport_rl Initializes the rate limit of the specified VPORT
+ * @brief ecore_init_vport_rl - Initializes the rate limit of the specified
+ * VPORT.
*
- * @param p_hwfn
+ * @param p_hwfn - HW device data
* @param p_ptt - ptt window used for writing the registers
* @param vport_id - VPORT ID
* @param vport_rl - rate limit in Mb/sec units
@@ -158,6 +168,7 @@ int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 vport_id,
u32 vport_rl);
+
/**
* @brief ecore_send_qm_stop_cmd Sends a stop command to the QM
*
@@ -178,6 +189,7 @@ bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
u16 start_pq,
u16 num_pqs);
#ifndef UNUSED_HSI_FUNC
+
/**
* @brief ecore_init_nig_ets - initializes the NIG ETS arbiter
*
@@ -193,6 +205,7 @@ void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req,
bool is_lb);
+
/**
* @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs
*
@@ -205,6 +218,7 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_lb_rl_req *req);
#endif /* UNUSED_HSI_FUNC */
+
/**
* @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map.
*
@@ -216,6 +230,7 @@ void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_nig_pri_tc_map_req *req);
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter
@@ -229,6 +244,7 @@ void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_ets_req *req);
#endif /* UNUSED_HSI_FUNC */
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC
@@ -242,6 +258,7 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct init_brb_ram_req *req);
#endif /* UNUSED_HSI_FUNC */
+
#ifndef UNUSED_HSI_FUNC
/**
* @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf and llh
@@ -250,22 +267,24 @@ void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
* if engine
* is in BD mode.
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType);
+
/**
* @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs to
* input ethType should Be called
* once per port.
*
- * @param p_ptt - ptt window used for writing the registers.
+ * @param p_ptt - ptt window used for writing the registers.
* @param ethType - etherType to configure
*/
void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt, u32 ethType);
#endif /* UNUSED_HSI_FUNC */
+
/**
* @brief ecore_set_vxlan_dest_port - initializes vxlan tunnel destination udp
* port
@@ -276,15 +295,17 @@ void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn,
void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 dest_port);
+
/**
* @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param vxlan_enable - vxlan enable flag.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
*/
void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool vxlan_enable);
+
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
@@ -296,6 +317,7 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
bool eth_gre_enable,
bool ip_gre_enable);
+
/**
* @brief ecore_set_geneve_dest_port - initializes geneve tunnel destination
* udp port
@@ -306,6 +328,7 @@ void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 dest_port);
+
/**
* @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW
*
@@ -318,6 +341,7 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
bool eth_geneve_enable,
bool ip_geneve_enable);
#ifndef UNUSED_HSI_FUNC
+
/**
* @brief ecore_set_gft_event_id_cm_hdr - configure GFT event id and cm header
*
@@ -325,16 +349,27 @@ void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
*/
void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+
+/**
+ * @brief ecore_set_rfs_mode_disable - Disable and configure HW for RFS
+ *
+ * @param p_hwfn - HW device data
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param pf_id - pf on which to disable RFS.
+ */
+void ecore_set_rfs_mode_disable(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 pf_id);
+
/**
* @brief ecore_set_rfs_mode_enable - enable and configure HW for RFS
*
-*
-* @param p_ptt - ptt window used for writing the registers.
-* @param pf_id - pf on which to enable RFS.
-* @param tcp - set profile tcp packets.
-* @param udp - set profile udp packet.
-* @param ipv4 - set profile ipv4 packet.
-* @param ipv6 - set profile ipv6 packet.
+* @param p_ptt - ptt window used for writing the registers.
+* @param pf_id - pf on which to enable RFS.
+* @param tcp - set profile tcp packets.
+* @param udp - set profile udp packet.
+* @param ipv4 - set profile ipv4 packet.
+* @param ipv6 - set profile ipv6 packet.
*/
void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
@@ -344,6 +379,7 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
bool ipv4,
bool ipv6);
#endif /* UNUSED_HSI_FUNC */
+
/**
* @brief ecore_config_vf_zone_size_mode - Configure VF zone size mode. Must be
* used before first ETH queue started.
@@ -357,18 +393,20 @@ void ecore_set_rfs_mode_enable(struct ecore_hwfn *p_hwfn,
*/
void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn, struct ecore_ptt
*p_ptt, u16 mode, bool runtime_init);
+
/**
-* @brief ecore_get_mstorm_queue_stat_offset - get mstorm statistics offset by VF
-* zone size mode.
+ * @brief ecore_get_mstorm_queue_stat_offset - Get mstorm statistics offset by
+ * VF zone size mode.
*
* @param stat_cnt_id - statistic counter id
* @param vf_zone_size_mode - VF zone size mode. Use enum vf_zone_size_mode.
*/
u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
u16 stat_cnt_id, u16 vf_zone_size_mode);
+
/**
-* @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
-* size mode.
+ * @brief ecore_get_mstorm_eth_vf_prods_offset - VF producer offset by VF zone
+ * size mode.
*
* @param vf_id - vf id.
* @param vf_queue_id - per VF rx queue id.
@@ -376,4 +414,58 @@ u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
*/
u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn, u8 vf_id, u8
vf_queue_id, u16 vf_zone_size_mode);
+/**
+ * @brief ecore_enable_context_validation - Enable and configure context
+ * validation.
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ */
+void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+/**
+ * @brief ecore_calc_session_ctx_validation - Calcualte validation byte for
+ * session context.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param cid - context cid.
+ */
+void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 cid);
+/**
+ * @brief ecore_calc_task_ctx_validation - Calcualte validation byte for task
+ * context.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - context size.
+ * @param ctx_type - context type.
+ * @param tid - context tid.
+ */
+void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size,
+ u8 ctx_type, u32 tid);
+/**
+ * @brief ecore_memset_session_ctx - Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size,
+ u8 ctx_type);
+/**
+ * @brief ecore_memset_task_ctx - Memset session context to 0 while preserving
+ * validation bytes.
+ *
+ *
+ * @param p_ctx_mem - pointer to context memory.
+ * @param ctx_size - size to initialzie.
+ * @param ctx_type - context type.
+ */
+void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size,
+ u8 ctx_type);
#endif
diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c
index 351e9467..b907a95e 100644
--- a/drivers/net/qede/base/ecore_init_ops.c
+++ b/drivers/net/qede/base/ecore_init_ops.c
@@ -63,8 +63,8 @@ static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
- enum _ecore_status_t rc = ECORE_SUCCESS;
u16 i, segment;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
/* Since not all RT entries are initialized, go over the RT and
* for each segment of initialized values use DMA.
@@ -190,19 +190,19 @@ static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
bool b_must_dmae,
bool b_can_dmae)
{
+ u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
+ u32 data = OSAL_LE32_TO_CPU(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
#ifdef CONFIG_ECORE_ZIPPED_FW
u32 offset, output_len, input_len, max_size;
#endif
- u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
struct ecore_dev *p_dev = p_hwfn->p_dev;
- enum _ecore_status_t rc = ECORE_SUCCESS;
union init_array_hdr *hdr;
const u32 *array_data;
- u32 size, addr, data;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u32 size;
array_data = p_dev->fw_data->arr_data;
- data = OSAL_LE32_TO_CPU(cmd->data);
- addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
hdr = (union init_array_hdr *)
(uintptr_t)(array_data + dmae_array_offset);
@@ -272,13 +272,10 @@ static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
struct init_write_op *p_cmd,
bool b_can_dmae)
{
+ u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
enum _ecore_status_t rc = ECORE_SUCCESS;
- bool b_must_dmae;
- u32 addr, data;
-
- data = OSAL_LE32_TO_CPU(p_cmd->data);
- b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
- addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
/* Sanitize */
if (b_must_dmae && !b_can_dmae) {
@@ -452,10 +449,10 @@ enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
int phase, int phase_id, int modes)
{
struct ecore_dev *p_dev = p_hwfn->p_dev;
- enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
bool b_dmae = false;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
num_init_ops = p_dev->fw_data->init_ops_size;
init_ops = p_dev->fw_data->init_ops;
@@ -573,8 +570,7 @@ enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
return ECORE_INVAL;
}
- /* First Dword contains metadata and should be skipped */
- buf_hdr = (struct bin_buffer_hdr *)((uintptr_t)(data + sizeof(u32)));
+ buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data;
offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset));
diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c
index 6fb037df..8dc4d150 100644
--- a/drivers/net/qede/base/ecore_int.c
+++ b/drivers/net/qede/base/ecore_int.c
@@ -59,6 +59,11 @@ struct aeu_invert_reg_bit {
#define ATTENTION_OFFSET_MASK (0x000ff000)
#define ATTENTION_OFFSET_SHIFT (12)
+#define ATTENTION_BB_MASK (0x00700000)
+#define ATTENTION_BB_SHIFT (20)
+#define ATTENTION_BB(value) ((value) << ATTENTION_BB_SHIFT)
+#define ATTENTION_BB_DIFFERENT (1 << 23)
+
#define ATTENTION_CLEAR_ENABLE (1 << 28)
unsigned int flags;
@@ -414,7 +419,7 @@ ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
-#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f0000)
#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
@@ -468,7 +473,26 @@ static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
return ECORE_INVAL;
}
-/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+/* Instead of major changes to the data-structure, we have a some 'special'
+ * identifiers for sources that changed meaning between adapters.
+ */
+enum aeu_invert_reg_special_type {
+ AEU_INVERT_REG_SPECIAL_CNIG_0,
+ AEU_INVERT_REG_SPECIAL_CNIG_1,
+ AEU_INVERT_REG_SPECIAL_CNIG_2,
+ AEU_INVERT_REG_SPECIAL_CNIG_3,
+ AEU_INVERT_REG_SPECIAL_MAX,
+};
+
+static struct aeu_invert_reg_bit
+aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
+ {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+ {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
+};
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
{
{ /* After Invert 1 */
@@ -511,8 +535,18 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
OSAL_NULL, MAX_BLOCK_ID},
{"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
ecore_general_attention_35, MAX_BLOCK_ID},
- {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
- BLOCK_CNIG},
+ {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
+ OSAL_NULL, BLOCK_NWS},
+ {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
+ OSAL_NULL, BLOCK_NWM},
+ {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
+ OSAL_NULL, BLOCK_NWM},
{"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
{"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
{"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
@@ -634,6 +668,27 @@ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
};
+static struct aeu_invert_reg_bit *
+ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ if (!ECORE_IS_BB(p_hwfn->p_dev))
+ return p_bit;
+
+ if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
+ return p_bit;
+
+ return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
+ ATTENTION_BB_SHIFT];
+}
+
+static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
+ ATTENTION_PARITY);
+}
+
#define ATTN_STATE_BITS (0xfff)
#define ATTN_BITS_MASKABLE (0x3ff)
struct ecore_sb_attn_info {
@@ -868,7 +923,7 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
- if ((p_bit->flags & ATTENTION_PARITY) &&
+ if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
!!(parities & (1 << bit_idx))) {
ecore_int_deassertion_parity(p_hwfn, p_bit,
bit_idx);
@@ -905,26 +960,29 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
unsigned long int bitmask;
u8 bit, bit_len;
+ /* Need to account bits with changed meaning */
p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
- /* No need to handle attention-only bits */
- if (p_aeu->flags == ATTENTION_PAR)
- continue;
-
bit = bit_idx;
bit_len = ATTENTION_LENGTH(p_aeu->flags);
- if (p_aeu->flags & ATTENTION_PAR_INT) {
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
/* Skip Parity */
bit++;
bit_len--;
}
+ /* Find the bits relating to HW-block, then
+ * shift so they'll become LSB.
+ */
bitmask = bits & (((1 << bit_len) - 1) << bit);
+ bitmask >>= bit;
+
if (bitmask) {
u32 flags = p_aeu->flags;
char bit_name[30];
+ u8 num;
- bit = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
+ num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
bit_len);
/* Some bits represent more than a
@@ -936,11 +994,17 @@ static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
ATTENTION_LENGTH(flags) > 1))
OSAL_SNPRINTF(bit_name, 30,
p_aeu->bit_name,
- bit);
+ num);
else
OSAL_STRNCPY(bit_name,
p_aeu->bit_name,
30);
+
+ /* We now need to pass bitmask in its
+ * correct position.
+ */
+ bitmask <<= bit;
+
/* Handle source of the attention */
ecore_int_deassertion_aeu_bit(p_hwfn,
p_aeu,
@@ -1203,12 +1267,13 @@ static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
for (i = 0; i < NUM_ATTN_REGS; i++) {
/* j is array index, k is bit index */
for (j = 0, k = 0; k < 32; j++) {
- unsigned int flags = aeu_descs[i].bits[j].flags;
+ struct aeu_invert_reg_bit *p_aeu;
- if (flags & ATTENTION_PARITY)
+ p_aeu = &aeu_descs[i].bits[j];
+ if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
sb_info->parity_mask[i] |= 1 << k;
- k += ATTENTION_LENGTH(flags);
+ k += ATTENTION_LENGTH(p_aeu->flags);
}
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
"Attn Mask [Reg %d]: 0x%08x\n",
@@ -1234,7 +1299,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
if (!p_sb) {
DP_NOTICE(p_dev, true,
- "Failed to allocate `struct ecore_sb_attn_info'");
+ "Failed to allocate `struct ecore_sb_attn_info'\n");
return ECORE_NOMEM;
}
@@ -1243,7 +1308,7 @@ static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
SB_ATTN_ALIGNED_SIZE(p_hwfn));
if (!p_virt) {
DP_NOTICE(p_dev, true,
- "Failed to allocate status block (attentions)");
+ "Failed to allocate status block (attentions)\n");
OSAL_FREE(p_dev, p_sb);
return ECORE_NOMEM;
}
@@ -1964,6 +2029,31 @@ enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
}
}
}
+
+ /* There's a possibility the igu_sb_cnt_iov doesn't properly reflect
+ * the number of VF SBs [especially for first VF on engine, as we can't
+ * diffrentiate between empty entries and its entries].
+ * Since we don't really support more SBs than VFs today, prevent any
+ * such configuration by sanitizing the number of SBs to equal the
+ * number of VFs.
+ */
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 total_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
+
+ if (total_vfs < p_igu_info->free_blks) {
+ DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
+ "Limiting number of SBs for IOV - %04x --> %04x\n",
+ p_igu_info->free_blks,
+ p_hwfn->p_dev->p_iov_info->total_vfs);
+ p_igu_info->free_blks = total_vfs;
+ } else if (total_vfs > p_igu_info->free_blks) {
+ DP_NOTICE(p_hwfn, true,
+ "IGU has only %04x SBs for VFs while the device has %04x VFs\n",
+ p_igu_info->free_blks, total_vfs);
+ return ECORE_INVAL;
+ }
+ }
+
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
@@ -2092,24 +2182,6 @@ void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
-{
- struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
-
- /* Determine origin of SB id */
- if ((sb_id >= p_info->igu_base_sb) &&
- (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
- return sb_id - p_info->igu_base_sb;
- } else if ((sb_id >= p_info->igu_base_sb_iov) &&
- (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
- return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
- } else {
- DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
- sb_id);
- return 0;
- }
-}
-
void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
{
int i;
@@ -2127,8 +2199,8 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u8 timer_res, u16 sb_id, bool tx)
{
- enum _ecore_status_t rc;
struct cau_sb_entry sb_entry;
+ enum _ecore_status_t rc;
if (!p_hwfn->hw_init_done) {
DP_ERR(p_hwfn, "hardware not initialized yet\n");
@@ -2159,3 +2231,30 @@ enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
return rc;
}
+
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info)
+{
+ u16 sbid = p_sb->igu_sb_id;
+ int i;
+
+ if (IS_VF(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
+ return ECORE_INVAL;
+
+ p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_PRODUCER_MEMORY + sbid * 4);
+ p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
+ IGU_REG_CONSUMER_MEM + sbid * 4);
+
+ for (i = 0; i < PIS_PER_SB; i++)
+ p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY +
+ sbid * 4 * PIS_PER_SB + i * 4);
+
+ return ECORE_SUCCESS;
+}
diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h
index 45358b94..0c8929e3 100644
--- a/drivers/net/qede/base/ecore_int.h
+++ b/drivers/net/qede/base/ecore_int.h
@@ -172,16 +172,6 @@ void ecore_int_free(struct ecore_hwfn *p_hwfn);
void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt);
/**
- * @brief - Returns an Rx queue index appropriate for usage with given SB.
- *
- * @param p_hwfn
- * @param sb_id - absolute index of SB
- *
- * @return index of Rx queue
- */
-u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id);
-
-/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h
index fc873e77..799fbe82 100644
--- a/drivers/net/qede/base/ecore_int_api.h
+++ b/drivers/net/qede/base/ecore_int_api.h
@@ -41,6 +41,12 @@ struct ecore_sb_info {
struct ecore_dev *p_dev;
};
+struct ecore_sb_info_dbg {
+ u32 igu_prod;
+ u32 igu_cons;
+ u16 pi[PIS_PER_SB];
+};
+
struct ecore_sb_cnt_info {
int sb_cnt;
int sb_iov_cnt;
@@ -108,7 +114,7 @@ static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#else
-static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
+static OSAL_INLINE void __internal_ram_wr(__rte_unused void *p_hwfn,
void OSAL_IOMEM *addr,
int size, u32 *data)
#endif
@@ -120,19 +126,37 @@ static OSAL_INLINE void __internal_ram_wr(void *p_hwfn,
}
#ifdef ECORE_CONFIG_DIRECT_HWFN
+static OSAL_INLINE void __internal_ram_wr_relaxed(struct ecore_hwfn *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#else
+static OSAL_INLINE void __internal_ram_wr_relaxed(__rte_unused void *p_hwfn,
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
+#endif
+{
+ unsigned int i;
+
+ for (i = 0; i < size / sizeof(*data); i++)
+ DIRECT_REG_WR_RELAXED(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i],
+ data[i]);
+}
+
+#ifdef ECORE_CONFIG_DIRECT_HWFN
static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn,
- void OSAL_IOMEM *addr,
- int size, u32 *data)
+ void OSAL_IOMEM * addr,
+ int size, u32 *data)
{
- __internal_ram_wr(p_hwfn, addr, size, data);
+ __internal_ram_wr_relaxed(p_hwfn, addr, size, data);
}
#else
static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr,
- int size, u32 *data)
+ int size, u32 *data)
{
- __internal_ram_wr(OSAL_NULL, addr, size, data);
+ __internal_ram_wr_relaxed(OSAL_NULL, addr, size, data);
}
#endif
+
#endif
struct ecore_hwfn;
@@ -285,4 +309,19 @@ void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev);
*/
void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable);
+/**
+ * @brief Read debug information regarding a given SB.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_sb - point to Status block for which we want to get info.
+ * @param p_info - pointer to struct to fill with information regarding SB.
+ *
+ * @return ECORE_SUCCESS if pointer is filled; failure otherwise.
+ */
+enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_sb_info *p_sb,
+ struct ecore_sb_info_dbg *p_info);
+
#endif
diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h
index bb8df82f..50cb3f2b 100644
--- a/drivers/net/qede/base/ecore_iov_api.h
+++ b/drivers/net/qede/base/ecore_iov_api.h
@@ -52,6 +52,7 @@ enum ecore_iov_pf_to_vf_status {
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
+ PFVF_STATUS_MALICIOUS,
};
struct ecore_mcp_link_params;
@@ -87,6 +88,28 @@ struct ecore_public_vf_info {
u16 forced_vlan;
};
+struct ecore_iov_vf_init_params {
+ u16 rel_vf_id;
+
+ /* Number of requested Queues; Currently, don't support different
+ * number of Rx/Tx queues.
+ */
+ /* TODO - remove this limitation */
+ u16 num_queues;
+
+ /* Allow the client to choose which qzones to use for Rx/Tx,
+ * and which queue_base to use for Tx queues on a per-queue basis.
+ * Notice values should be relative to the PF resources.
+ */
+ u16 req_rx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+ u16 req_tx_queue[ECORE_MAX_VF_CHAINS_PER_PF];
+
+ u8 vport_id;
+
+ /* Should be set in case RSS is going to be used for VF */
+ u8 rss_eng_id;
+};
+
#ifdef CONFIG_ECORE_SW_CHANNEL
/* This is SW channel related only... */
enum mbx_state {
@@ -174,15 +197,14 @@ void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
*
* @param p_hwfn
* @param p_ptt
- * @param rel_vf_id
- * @param num_rx_queues
+ * @param p_params
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u16 rel_vf_id,
- u16 num_rx_queues);
+ struct ecore_iov_vf_init_params
+ *p_params);
/**
* @brief ecore_iov_process_mbx_req - process a request received
@@ -301,12 +323,13 @@ bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param rel_vf_id - Relative VF ID
* @param b_enabled_only - consider only enabled VF
+ * @param b_non_malicious - true iff we want to validate vf isn't malicious.
*
* @return bool - true for valid VF ID
*/
bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn,
int rel_vf_id,
- bool b_enabled_only);
+ bool b_enabled_only, bool b_non_malicious);
/**
* @brief Get VF's public info structure
@@ -399,16 +422,6 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
u16 *opaque_fid);
/**
- * @brief Get VFs VPORT id.
- *
- * @param p_hwfn
- * @param vfid
- * @param vport id
- */
-void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
- u8 *p_vport_id);
-
-/**
* @brief Set forced VLAN [pvid] in PFs copy of bulletin board
* and configures FW/HW to support the configuration.
* Setting of pvid 0 would clear the feature.
@@ -662,24 +675,24 @@ bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn,
u16 rel_vf_id);
/**
- * @brief - Get VF's vport min rate configured.
+ * @brief - Returm true if VF has started in FW
+ *
* @param p_hwfn
* @param rel_vf_id
*
- * @return - rate in Mbps
+ * @return
*/
-int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id);
/**
- * @brief - Configure min rate for VF's vport.
- * @param p_dev
- * @param vfid
- * @param - rate in Mbps
+ * @brief - Get VF's vport min rate configured.
+ * @param p_hwfn
+ * @param rel_vf_id
*
- * @return
+ * @return - rate in Mbps
*/
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
- int vfid, u32 rate);
+int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid);
#endif
/**
@@ -688,15 +701,17 @@ enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
* @param p_hwfn
* @param rel_vf_id
*
- * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ * @return E4_MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id);
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port);
#endif /* CONFIG_ECORE_SRIOV */
#define ecore_for_each_vf(_p_hwfn, _i) \
for (_i = ecore_iov_get_next_active_vf(_p_hwfn, 0); \
- _i < MAX_NUM_VFS; \
+ _i < E4_MAX_NUM_VFS; \
_i = ecore_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif
diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h
index aad90123..b4bfe89f 100644
--- a/drivers/net/qede/base/ecore_iro.h
+++ b/drivers/net/qede/base/ecore_iro.h
@@ -185,5 +185,13 @@
#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) (IRO[46].base + \
((rdma_stat_counter_id) * IRO[46].m1))
#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[46].size)
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) (IRO[47].base + \
+ ((pf_id) * IRO[47].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[47].size)
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) (IRO[48].base + \
+ ((roce_pf_id) * IRO[48].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[48].size)
#endif /* __IRO_H__ */
diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h
index 43e01e47..6764bfa6 100644
--- a/drivers/net/qede/base/ecore_iro_values.h
+++ b/drivers/net/qede/base/ecore_iro_values.h
@@ -9,13 +9,13 @@
#ifndef __IRO_VALUES_H__
#define __IRO_VALUES_H__
-static const struct iro iro_arr[47] = {
+static const struct iro iro_arr[49] = {
/* YSTORM_FLOW_CONTROL_MODE_OFFSET */
{ 0x0, 0x0, 0x0, 0x0, 0x8},
/* TSTORM_PORT_STAT_OFFSET(port_id) */
- { 0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ { 0x4cb0, 0x80, 0x0, 0x0, 0x80},
/* TSTORM_LL2_PORT_STAT_OFFSET(port_id) */
- { 0x6318, 0x20, 0x0, 0x0, 0x20},
+ { 0x6518, 0x20, 0x0, 0x0, 0x20},
/* USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) */
{ 0xb00, 0x8, 0x0, 0x0, 0x4},
/* USTORM_FLR_FINAL_ACK_OFFSET(pf_id) */
@@ -41,7 +41,7 @@ static const struct iro iro_arr[47] = {
/* TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) */
{ 0xa28, 0x8, 0x0, 0x0, 0x8},
/* CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
- { 0x60f8, 0x10, 0x0, 0x0, 0x10},
+ { 0x61f8, 0x10, 0x0, 0x0, 0x10},
/* CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) */
{ 0xb820, 0x30, 0x0, 0x0, 0x30},
/* CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) */
@@ -53,7 +53,7 @@ static const struct iro iro_arr[47] = {
/* MSTORM_ETH_VF_PRODS_OFFSET(vf_id,vf_queue_id) */
{ 0x53a0, 0x80, 0x4, 0x0, 0x4},
/* MSTORM_TPA_TIMEOUT_US_OFFSET */
- { 0xc8f0, 0x0, 0x0, 0x0, 0x4},
+ { 0xc7c8, 0x0, 0x0, 0x0, 0x4},
/* MSTORM_ETH_PF_STAT_OFFSET(pf_id) */
{ 0x4ba0, 0x80, 0x0, 0x0, 0x20},
/* USTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
@@ -63,13 +63,13 @@ static const struct iro iro_arr[47] = {
/* PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) */
{ 0x2b48, 0x80, 0x0, 0x0, 0x38},
/* PSTORM_ETH_PF_STAT_OFFSET(pf_id) */
- { 0xf188, 0x78, 0x0, 0x0, 0x78},
+ { 0xf1b0, 0x78, 0x0, 0x0, 0x78},
/* PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethType_id) */
{ 0x1f8, 0x4, 0x0, 0x0, 0x4},
/* TSTORM_ETH_PRS_INPUT_OFFSET */
- { 0xacf0, 0x0, 0x0, 0x0, 0xf0},
+ { 0xaef8, 0x0, 0x0, 0x0, 0xf0},
/* ETH_RX_RATE_LIMIT_OFFSET(pf_id) */
- { 0xade0, 0x8, 0x0, 0x0, 0x8},
+ { 0xafe8, 0x8, 0x0, 0x0, 0x8},
/* XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) */
{ 0x1f8, 0x8, 0x0, 0x0, 0x8},
/* YSTORM_TOE_CQ_PROD_OFFSET(rss_id) */
@@ -85,25 +85,29 @@ static const struct iro iro_arr[47] = {
/* MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id,bdq_id) */
{ 0xb78, 0x10, 0x8, 0x0, 0x2},
/* TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0xd888, 0x38, 0x0, 0x0, 0x24},
+ { 0xd9a8, 0x38, 0x0, 0x0, 0x24},
/* MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
- { 0x12c38, 0x10, 0x0, 0x0, 0x8},
+ { 0x12988, 0x10, 0x0, 0x0, 0x8},
/* USTORM_ISCSI_RX_STATS_OFFSET(pf_id) */
{ 0x11aa0, 0x38, 0x0, 0x0, 0x18},
/* XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0xa8c0, 0x30, 0x0, 0x0, 0x10},
+ { 0xa8c0, 0x38, 0x0, 0x0, 0x10},
/* YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
- { 0x86f8, 0x28, 0x0, 0x0, 0x18},
+ { 0x86f8, 0x30, 0x0, 0x0, 0x18},
/* PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) */
{ 0x101f8, 0x10, 0x0, 0x0, 0x10},
/* TSTORM_FCOE_RX_STATS_OFFSET(pf_id) */
- { 0xdd08, 0x48, 0x0, 0x0, 0x38},
+ { 0xde28, 0x48, 0x0, 0x0, 0x38},
/* PSTORM_FCOE_TX_STATS_OFFSET(pf_id) */
{ 0x10660, 0x20, 0x0, 0x0, 0x20},
/* PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
{ 0x2b80, 0x80, 0x0, 0x0, 0x10},
/* TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) */
- { 0x5000, 0x10, 0x0, 0x0, 0x10},
+ { 0x5020, 0x10, 0x0, 0x0, 0x10},
+/* XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) */
+ { 0xc9b0, 0x30, 0x0, 0x0, 0x10},
+/* TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) */
+ { 0xeec0, 0x10, 0x0, 0x0, 0x10},
};
#endif /* __IRO_VALUES_H__ */
diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c
index 74f61b00..4ab8fd5f 100644
--- a/drivers/net/qede/base/ecore_l2.c
+++ b/drivers/net/qede/base/ecore_l2.c
@@ -29,6 +29,306 @@
#define ECORE_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
+struct ecore_l2_info {
+ u32 queues;
+ unsigned long **pp_qid_usage;
+
+ /* The lock is meant to synchronize access to the qid usage */
+ osal_mutex_t lock;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
+{
+ struct ecore_l2_info *p_l2_info;
+ unsigned long **pp_qids;
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return ECORE_SUCCESS;
+
+ p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
+ if (!p_l2_info)
+ return ECORE_NOMEM;
+ p_hwfn->p_l2_info = p_l2_info;
+
+ if (IS_PF(p_hwfn->p_dev)) {
+ p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
+ } else {
+ u8 rx = 0, tx = 0;
+
+ ecore_vf_get_num_rxqs(p_hwfn, &rx);
+ ecore_vf_get_num_txqs(p_hwfn, &tx);
+
+ p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
+ }
+
+ pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
+ sizeof(unsigned long *) *
+ p_l2_info->queues);
+ if (pp_qids == OSAL_NULL)
+ return ECORE_NOMEM;
+ p_l2_info->pp_qid_usage = pp_qids;
+
+ for (i = 0; i < p_l2_info->queues; i++) {
+ pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
+ MAX_QUEUES_PER_QZONE / 8);
+ if (pp_qids[i] == OSAL_NULL)
+ return ECORE_NOMEM;
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
+#endif
+
+ return ECORE_SUCCESS;
+}
+
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
+{
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_l2_free(struct ecore_hwfn *p_hwfn)
+{
+ u32 i;
+
+ if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ if (p_hwfn->p_l2_info == OSAL_NULL)
+ return;
+
+ if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
+ goto out_l2_info;
+
+ /* Free until hit first uninitialized entry */
+ for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
+ if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
+ break;
+ OSAL_VFREE(p_hwfn->p_dev,
+ p_hwfn->p_l2_info->pp_qid_usage[i]);
+ }
+
+#ifdef CONFIG_ECORE_LOCK_ALLOC
+ /* Lock is last to initialize, if everything else was */
+ if (i == p_hwfn->p_l2_info->queues)
+ OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
+#endif
+
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
+
+out_l2_info:
+ OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
+ p_hwfn->p_l2_info = OSAL_NULL;
+}
+
+/* TODO - we'll need locking around these... */
+static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
+ u16 queue_id = p_cid->rel.queue_id;
+ bool b_rc = true;
+ u8 first;
+
+ OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
+
+ if (queue_id > p_l2_info->queues) {
+ DP_NOTICE(p_hwfn, true,
+ "Requested to increase usage for qzone %04x out of %08x\n",
+ queue_id, p_l2_info->queues);
+ b_rc = false;
+ goto out;
+ }
+
+ first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
+ MAX_QUEUES_PER_QZONE);
+ if (first >= MAX_QUEUES_PER_QZONE) {
+ b_rc = false;
+ goto out;
+ }
+
+ OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
+ p_cid->qid_usage_idx = first;
+
+out:
+ OSAL_MUTEX_RELEASE(&p_l2_info->lock);
+ return b_rc;
+}
+
+static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
+
+ OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
+ p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
+
+ OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
+}
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
+{
+ /* For VF-queues, stuff is a bit complicated as:
+ * - They always maintain the qid_usage on their own.
+ * - In legacy mode, they also maintain their CIDs.
+ */
+
+ /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
+ if (IS_PF(p_hwfn->p_dev) && !p_cid->b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
+ if (!p_cid->b_legacy_vf)
+ ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+static struct ecore_queue_cid *
+_ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid, u32 cid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
+ p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
+ if (p_cid == OSAL_NULL)
+ return OSAL_NULL;
+
+ p_cid->opaque_fid = opaque_fid;
+ p_cid->cid = cid;
+ p_cid->rel = *p_params;
+ p_cid->p_owner = p_hwfn;
+
+ /* Fill-in bits related to VFs' queues if information was provided */
+ if (p_vf_params != OSAL_NULL) {
+ p_cid->vfid = p_vf_params->vfid;
+ p_cid->vf_qid = p_vf_params->vf_qid;
+ p_cid->b_legacy_vf = p_vf_params->b_legacy;
+ } else {
+ p_cid->vfid = ECORE_QUEUE_CID_PF;
+ }
+
+ /* Don't try calculating the absolute indices for VFs */
+ if (IS_VF(p_hwfn->p_dev)) {
+ p_cid->abs = p_cid->rel;
+
+ goto out;
+ }
+
+ /* Calculate the engine-absolute indices of the resources.
+ * The would guarantee they're valid later on.
+ * In some cases [SBs] we already have the right values.
+ */
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
+ &p_cid->abs.queue_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+
+ /* In case of a PF configuring its VF's queues, the stats-id is already
+ * absolute [since there's a single index that's suitable per-VF].
+ */
+ if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
+ rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
+ &p_cid->abs.stats_id);
+ if (rc != ECORE_SUCCESS)
+ goto fail;
+ } else {
+ p_cid->abs.stats_id = p_cid->rel.stats_id;
+ }
+
+ /* SBs relevant information was already provided as absolute */
+ p_cid->abs.sb = p_cid->rel.sb;
+ p_cid->abs.sb_idx = p_cid->rel.sb_idx;
+
+out:
+ /* VF-images have provided the qid_usage_idx on their own.
+ * Otherwise, we need to allocate a unique one.
+ */
+ if (!p_vf_params) {
+ if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
+ goto fail;
+ } else {
+ p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ p_cid->opaque_fid, p_cid->cid,
+ p_cid->rel.vport_id, p_cid->abs.vport_id,
+ p_cid->rel.queue_id, p_cid->qid_usage_idx,
+ p_cid->abs.queue_id,
+ p_cid->rel.stats_id, p_cid->abs.stats_id,
+ p_cid->abs.sb, p_cid->abs.sb_idx);
+
+ return p_cid;
+
+fail:
+ OSAL_VFREE(p_hwfn->p_dev, p_cid);
+ return OSAL_NULL;
+}
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params)
+{
+ struct ecore_queue_cid *p_cid;
+ u8 vfid = ECORE_CXT_PF_CID;
+ bool b_legacy_vf = false;
+ u32 cid = 0;
+
+ /* In case of legacy VFs, The CID can be derived from the additional
+ * VF parameters - the VF assumes queue X uses CID X, so we can simply
+ * use the vf_qid for this purpose as well.
+ */
+ if (p_vf_params) {
+ vfid = p_vf_params->vfid;
+
+ if (p_vf_params->b_legacy) {
+ b_legacy_vf = true;
+ cid = p_vf_params->vf_qid;
+ }
+ }
+
+ /* Get a unique firmware CID for this queue, in case it's a PF.
+ * VF's don't need a CID as the queue configuration will be done
+ * by PF.
+ */
+ if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
+ if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &cid, vfid) != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
+ return OSAL_NULL;
+ }
+ }
+
+ p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
+ p_params, p_vf_params);
+ if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
+ _ecore_cxt_release_cid(p_hwfn, cid, vfid);
+
+ return p_cid;
+}
+
+static struct ecore_queue_cid *
+ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params)
+{
+ return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
+}
+
enum _ecore_status_t
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params)
@@ -36,9 +336,9 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
+ u16 rx_mode = 0, tx_err = 0;
u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- u16 rx_mode = 0;
rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != ECORE_SUCCESS)
@@ -71,6 +371,30 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
+ /* Handle requests for strict behavior on transmission errors */
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
+ p_params->b_err_illegal_vlan_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
+ p_params->b_err_small_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
+ p_params->b_err_anti_spoof ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
+ p_params->b_err_illegal_inband_mode ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
+ p_params->b_err_vlan_insert_with_inband ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
+ p_params->b_err_big_pkt ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
+ p_params->b_err_ctrl_frame ?
+ ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+ p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
+
/* TPA related fields */
OSAL_MEMSET(&p_ramrod->tpa_param, 0,
sizeof(struct eth_vport_tpa_param));
@@ -129,10 +453,9 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
struct ecore_rss_params *p_rss)
{
- enum _ecore_status_t rc = ECORE_SUCCESS;
struct eth_vport_rss_config *p_config;
- u16 abs_l2_queue = 0;
- int i;
+ int i, table_size;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
if (!p_rss) {
p_ramrod->common.update_rss_flg = 0;
@@ -186,16 +509,40 @@ ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
p_config->capabilities,
p_config->update_rss_ind_table, p_config->update_rss_key);
- for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
- rc = ecore_fw_l2_queue(p_hwfn,
- (u8)p_rss->rss_ind_table[i],
- &abs_l2_queue);
- if (rc != ECORE_SUCCESS)
- return rc;
+ table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
+ 1 << p_config->tbl_size);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
+
+ if (!p_queue)
+ return ECORE_INVAL;
+
+ p_config->indirection_table[i] =
+ OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
+ }
- p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue);
- DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n",
- i, p_config->indirection_table[i]);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "Configured RSS indirection table [%d entries]:\n",
+ table_size);
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
+ OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
}
for (i = 0; i < 10; i++)
@@ -250,8 +597,8 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "p_ramrod->rx_mode.state = 0x%x\n",
- state);
+ "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
}
/* Set Tx mode accept flags */
@@ -274,8 +621,8 @@ ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "p_ramrod->tx_mode.state = 0x%x\n",
- state);
+ "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
+ p_ramrod->common.vport_id, state);
}
}
@@ -534,57 +881,28 @@ ecore_filter_accept_cmd(struct ecore_dev *p_dev,
return 0;
}
-static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn,
- struct ecore_hw_cid_data *p_cid_data)
-{
- if (!p_cid_data->b_cid_allocated)
- return;
-
- ecore_cxt_release_cid(p_hwfn, p_cid_data->cid);
- p_cid_data->b_cid_allocated = false;
-}
-
enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod)
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_rx_cid;
- u16 abs_rx_q_id = 0;
- u8 abs_vport_id = 0;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- /* Store information for the stop */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- p_rx_cid->cid = cid;
- p_rx_cid->opaque_fid = opaque_fid;
- p_rx_cid->vport_id = p_params->vport_id;
-
- rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, cid, p_params->queue_id,
- p_params->vport_id, p_params->sb);
+ "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
+ p_cid->abs.vport_id, p_cid->abs.sb);
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -595,11 +913,11 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.rx_queue_start;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
- p_ramrod->sb_index = (u8)p_params->sb_idx;
- p_ramrod->vport_id = abs_vport_id;
- p_ramrod->stats_counter_id = p_params->stats_id;
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = 0;
p_ramrod->complete_event_flg = 1;
@@ -609,92 +927,88 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- if (p_params->vf_qid || b_use_zone_a_prod) {
- p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
+ if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
+ p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
- b_use_zone_a_prod ? " [legacy]" : "",
- p_params->vf_qid);
- p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
+ !!p_cid->b_legacy_vf ? " [legacy]" : "",
+ p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
}
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void OSAL_IOMEM * *pp_prod)
{
- struct ecore_hw_cid_data *p_rx_cid;
u32 init_prod_val = 0;
- u16 abs_l2_queue = 0;
- u8 abs_stats_id = 0;
- enum _ecore_status_t rc;
- if (IS_VF(p_hwfn->p_dev)) {
- return ecore_vf_pf_rxq_start(p_hwfn,
- p_params->queue_id,
- p_params->sb,
- (u8)p_params->sb_idx,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr,
- cqe_pbl_size, pp_prod);
- }
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
+ GTT_BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
+ return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size);
+}
+
+enum _ecore_status_t
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
+
/* Allocate a CID for the queue */
- p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
- rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
- &p_rx_cid->cid);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
- return rc;
- }
- p_rx_cid->b_cid_allocated = true;
- p_params->stats_id = abs_stats_id;
- p_params->vf_qid = 0;
-
- rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
- opaque_fid,
- p_rx_cid->cid,
- p_params,
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_NOMEM;
+
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size,
+ &p_ret_params->p_prod);
+ else
+ rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr,
cqe_pbl_size,
- false);
+ &p_ret_params->p_prod);
+ /* Provide the caller with a reference to as handler */
if (rc != ECORE_SUCCESS)
- ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
enum _ecore_status_t
ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handles,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
@@ -704,14 +1018,14 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_rx_cid;
- u16 qid, abs_rx_q_id = 0;
+ struct ecore_queue_cid *p_cid;
enum _ecore_status_t rc = ECORE_NOTIMPL;
u8 i;
if (IS_VF(p_hwfn->p_dev))
return ecore_vf_pf_rxqs_update(p_hwfn,
- rx_queue_id,
+ (struct ecore_queue_cid **)
+ pp_rxq_handles,
num_rxqs,
complete_cqe_flg,
complete_event_flg);
@@ -721,12 +1035,11 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
init_data.p_comp_data = p_comp_data;
for (i = 0; i < num_rxqs; i++) {
- qid = rx_queue_id + i;
- p_rx_cid = &p_hwfn->p_rx_cids[qid];
+ p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
/* Get SPQ entry */
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_RX_QUEUE_UPDATE,
@@ -735,41 +1048,34 @@ ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_update;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->complete_cqe_flg = complete_cqe_flg;
p_ramrod->complete_event_flg = complete_event_flg;
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc)
+ if (rc != ECORE_SUCCESS)
return rc;
}
return rc;
}
-enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only, bool cqe_completion)
+static enum _ecore_status_t
+ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool b_eq_completion_only,
+ bool b_cqe_completion)
{
- struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- u16 abs_rx_q_id = 0;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
-
- if (IS_VF(p_hwfn->p_dev))
- return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id,
- cqe_completion);
+ enum _ecore_status_t rc;
- /* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = p_rx_cid->cid;
- init_data.opaque_fid = p_rx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -779,64 +1085,56 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_stop;
-
- ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
- ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
- p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
/* Cleaning the queue requires the completion to arrive there.
* In addition, VFs require the answer to come as eqe to PF.
*/
- p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid ==
- p_hwfn->hw_info.opaque_fid) &&
- !eq_completion_only) || cqe_completion;
- p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid ==
- p_hwfn->hw_info.opaque_fid) ||
- eq_completion_only;
+ p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
+ !b_eq_completion_only) ||
+ b_cqe_completion;
+ p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
+ b_eq_completion_only;
- rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc != ECORE_SUCCESS)
- return rc;
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
- ecore_sp_release_queue_cid(p_hwfn, p_rx_cid);
+enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+ eq_completion_only,
+ cqe_completion);
+ else
+ rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
return rc;
}
enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union ecore_qm_pq_params *p_pq_params)
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id)
{
struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- struct ecore_hw_cid_data *p_tx_cid;
- u16 pq_id, abs_tx_q_id = 0;
- u8 abs_vport_id;
enum _ecore_status_t rc = ECORE_NOTIMPL;
- /* Store information for the stop */
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- p_tx_cid->cid = cid;
- p_tx_cid->opaque_fid = opaque_fid;
-
- rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
- rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
- if (rc != ECORE_SUCCESS)
- return rc;
-
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = cid;
- init_data.opaque_fid = opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -846,110 +1144,89 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.tx_queue_start;
- p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
- p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
- p_ramrod->sb_index = (u8)p_params->sb_idx;
- p_ramrod->stats_counter_id = p_params->stats_id;
+ p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
+ p_ramrod->sb_index = p_cid->abs.sb_idx;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
- p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
+ p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
+ p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
- pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params);
p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
}
-enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
+ dma_addr_t pbl_addr, u16 pbl_size,
void OSAL_IOMEM * *pp_doorbell)
{
- struct ecore_hw_cid_data *p_tx_cid;
- union ecore_qm_pq_params pq_params;
- u8 abs_stats_id = 0;
enum _ecore_status_t rc;
- if (IS_VF(p_hwfn->p_dev)) {
- return ecore_vf_pf_txq_start(p_hwfn,
- p_params->queue_id,
- p_params->sb,
- (u8)p_params->sb_idx,
- pbl_addr,
- pbl_size,
- pp_doorbell);
- }
-
- rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
+ /* TODO - set tc in the pq_params for multi-cos */
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
if (rc != ECORE_SUCCESS)
return rc;
- p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
- OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-
- pq_params.eth.tc = tc;
-
- /* Allocate a CID for the queue */
- rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid);
- if (rc != ECORE_SUCCESS) {
- DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
- return rc;
- }
- p_tx_cid->b_cid_allocated = true;
+ /* Provide the caller with the necessary return values */
+ *pp_doorbell = (u8 OSAL_IOMEM *)
+ p_hwfn->doorbells +
+ DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
- opaque_fid, p_tx_cid->cid, p_params->queue_id,
- p_params->vport_id, p_params->sb);
+ return ECORE_SUCCESS;
+}
- p_params->stats_id = abs_stats_id;
+enum _ecore_status_t
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params)
+{
+ struct ecore_queue_cid *p_cid;
+ enum _ecore_status_t rc;
- /* TODO - set tc in the pq_params for multi-cos */
- rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
- opaque_fid,
- p_tx_cid->cid,
- p_params,
- pbl_addr,
- pbl_size,
- &pq_params);
+ p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
+ if (p_cid == OSAL_NULL)
+ return ECORE_INVAL;
- *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY);
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+ else
+ rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
if (rc != ECORE_SUCCESS)
- ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
return rc;
}
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn)
-{
- return ECORE_NOTIMPL;
-}
-
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id)
+static enum _ecore_status_t
+ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
{
- struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
-
- if (IS_VF(p_hwfn->p_dev))
- return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+ enum _ecore_status_t rc;
- /* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
- init_data.cid = p_tx_cid->cid;
- init_data.opaque_fid = p_tx_cid->opaque_fid;
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
rc = ecore_sp_init_request(p_hwfn, &p_ent,
@@ -958,11 +1235,22 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (rc != ECORE_SUCCESS)
- return rc;
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_handle)
+{
+ struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
+ enum _ecore_status_t rc;
- ecore_sp_release_queue_cid(p_hwfn, p_tx_cid);
+ if (IS_PF(p_hwfn->p_dev))
+ rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+ else
+ rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
+
+ if (rc == ECORE_SUCCESS)
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
return rc;
}
@@ -988,17 +1276,6 @@ ecore_filter_action(enum ecore_filter_opcode opcode)
return action;
}
-static void ecore_set_fw_mac_addr(__le16 *fw_msb,
- __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
-{
- ((u8 *)fw_msb)[0] = mac[1];
- ((u8 *)fw_msb)[1] = mac[0];
- ((u8 *)fw_mid)[0] = mac[3];
- ((u8 *)fw_mid)[1] = mac[2];
- ((u8 *)fw_lsb)[0] = mac[5];
- ((u8 *)fw_lsb)[1] = mac[4];
-}
-
static enum _ecore_status_t
ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
u16 opaque_fid,
@@ -1093,6 +1370,9 @@ ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
case ECORE_FILTER_VNI:
p_first_filter->type = ETH_FILTER_TYPE_VNI;
break;
+ case ECORE_FILTER_UNUSED: /* @DPDK */
+ p_first_filter->type = MAX_ETH_FILTER_TYPE;
+ break;
}
if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
@@ -1738,3 +2018,87 @@ void ecore_reset_vport_stats(struct ecore_dev *p_dev)
else
_ecore_get_vport_stats(p_dev, p_dev->reset_stats);
}
+
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params)
+{
+ if (p_cfg_params->arfs_enable) {
+ ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6);
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
+ p_cfg_params->tcp ? "Enable" : "Disable",
+ p_cfg_params->udp ? "Enable" : "Disable",
+ p_cfg_params->ipv4 ? "Enable" : "Disable",
+ p_cfg_params->ipv6 ? "Enable" : "Disable");
+ } else {
+ ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
+ p_cfg_params->arfs_enable ? "Enable" : "Disable");
+}
+
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add)
+{
+ struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
+ struct ecore_spq_entry *p_ent = OSAL_NULL;
+ struct ecore_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+ rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Get SPQ entry */
+ OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+ init_data.cid = ecore_spq_get_cid(p_hwfn);
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (p_cb) {
+ init_data.comp_mode = ECORE_SPQ_MODE_CB;
+ init_data.p_comp_data = p_cb;
+ } else {
+ init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+ }
+
+ rc = ecore_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
+ PROTOCOLID_ETH, &init_data);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_update_gft;
+
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
+ p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
+ p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
+ p_ramrod->vport_id = abs_vport_id;
+ p_ramrod->filter_type = RFS_FILTER_TYPE;
+ p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
+ : GFT_DELETE_FILTER;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
+ abs_vport_id, abs_rx_q_id,
+ b_is_add ? "Adding" : "Removing",
+ (unsigned long)p_addr, length);
+
+ return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h
index 9c1bd388..7fe4cbcb 100644
--- a/drivers/net/qede/base/ecore_l2.h
+++ b/drivers/net/qede/base/ecore_l2.h
@@ -15,59 +15,106 @@
#include "ecore_spq.h"
#include "ecore_l2_api.h"
-/**
- * @brief ecore_sp_eth_tx_queue_update -
- *
- * This ramrod updates a TX queue. It is used for setting the active
- * state of the queue.
- *
- * @note Final phase API.
- *
- * @param p_hwfn
- *
- * @return enum _ecore_status_t
+#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
+#define ECORE_QUEUE_CID_PF (0xff)
+
+/* Additional parameters required for initialization of the queue_cid
+ * and are relevant only for a PF initializing one for its VFs.
*/
-enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn);
+struct ecore_queue_cid_vf_params {
+ /* Should match the VF's relative index */
+ u8 vfid;
+
+ /* 0-based queue index. Should reflect the relative qzone the
+ * VF thinks is associated with it [in its range].
+ */
+ u8 vf_qid;
+
+ /* Indicates a VF is legacy, making it differ in several things:
+ * - Producers would be placed in a different place.
+ * - Makes assumptions regarding the CIDs.
+ */
+ bool b_legacy;
+
+ /* For VFs, this index arrives via TLV to diffrentiate between
+ * different queues opened on the same qzone, and is passed
+ * [where the PF would have allocated it internally for its own].
+ */
+ u8 qid_usage_idx;
+};
+
+struct ecore_queue_cid {
+ /* 'Relative' is a relative term ;-). Usually the indices [not counting
+ * SBs] would be PF-relative, but there are some cases where that isn't
+ * the case - specifically for a PF configuring its VF indices it's
+ * possible some fields [E.g., stats-id] in 'rel' would already be abs.
+ */
+ struct ecore_queue_start_common_params rel;
+ struct ecore_queue_start_common_params abs;
+ u32 cid;
+ u16 opaque_fid;
+
+ /* VFs queues are mapped differently, so we need to know the
+ * relative queue associated with them [0-based].
+ * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+ * and not on the VF itself.
+ */
+ u8 vfid;
+ u8 vf_qid;
+
+ /* We need an additional index to diffrentiate between queues opened
+ * for same queue-zone, as VFs would have to communicate the info
+ * to the PF [otherwise PF has no way to diffrentiate].
+ */
+ u8 qid_usage_idx;
+
+ /* Legacy VFs might have Rx producer located elsewhere */
+ bool b_legacy_vf;
+
+ struct ecore_hwfn *p_owner;
+};
+
+enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn);
+void ecore_l2_setup(struct ecore_hwfn *p_hwfn);
+void ecore_l2_free(struct ecore_hwfn *p_hwfn);
+
+void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+struct ecore_queue_cid *
+ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ struct ecore_queue_cid_vf_params *p_vf_params);
enum _ecore_status_t
ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
struct ecore_sp_vport_start_params *p_params);
/**
- * @brief - Starts an Rx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts an Rx queue, when queue_cid is already prepared
*
* @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id, stats_id, sb, sb_idx, vf_qid]
- stats_id is absolute packed in p_params.
+ * @param p_cid
* @param bd_max_bytes
* @param bd_chain_phys_addr
* @param cqe_pbl_addr
* @param cqe_pbl_size
- * @param b_use_zone_a_prod - support legacy VF producers
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size, bool b_use_zone_a_prod);
+ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size);
/**
- * @brief - Starts a Tx queue; Should be used where contexts are handled
- * outside of the ramrod area [specifically iov scenarios]
+ * @brief - Starts a Tx queue, where queue_cid is already prepared
*
* @param p_hwfn
- * @param opaque_fid
- * @param cid
- * @param p_params [queue_id, vport_id,stats_id, sb, sb_idx, vf_qid]
+ * @param p_cid
* @param pbl_addr
* @param pbl_size
* @param p_pq_params - parameters for choosing the PQ for this Tx queue
@@ -75,14 +122,38 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct ecore_queue_start_common_params *p_params,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union ecore_qm_pq_params *p_pq_params);
+ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ u16 pq_id);
u8 ecore_mcast_bin_from_mac(u8 *mac);
+/**
+ * @brief - ecore_configure_rfs_ntuple_filter
+ *
+ * This ramrod should be used to add or remove arfs hw filter
+ *
+ * @params p_hwfn
+ * @params p_ptt
+ * @params p_cb Used for ECORE_SPQ_MODE_CB,where client would initialize
+ it with cookie and callback function address, if not
+ using this mode then client must pass NULL.
+ * @params p_addr p_addr is an actual packet header that needs to be
+ * filter. It has to mapped with IO to read prior to
+ * calling this, [contains 4 tuples- src ip, dest ip,
+ * src port, dest port].
+ * @params length length of p_addr header up to past the transport header.
+ * @params qid receive packet will be directed to this queue.
+ * @params vport_id
+ * @params b_is_add flag to add or remove filter.
+ *
+ */
+enum _ecore_status_t
+ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_spq_comp_cb *p_cb,
+ dma_addr_t p_addr, u16 length,
+ u16 qid, u8 vport_id,
+ bool b_is_add);
#endif
diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h
index 326fa45b..d09f3c4a 100644
--- a/drivers/net/qede/base/ecore_l2_api.h
+++ b/drivers/net/qede/base/ecore_l2_api.h
@@ -28,15 +28,26 @@ enum ecore_rss_caps {
#endif
struct ecore_queue_start_common_params {
- /* Rx/Tx queue id */
- u8 queue_id;
+ /* Should always be relative to entity sending this. */
u8 vport_id;
+ u16 queue_id;
- /* stats_id is relative or absolute depends on function */
+ /* Relative, but relevant only for PFs */
u8 stats_id;
+
+ /* These are always absolute */
u16 sb;
- u16 sb_idx;
- u16 vf_qid;
+ u8 sb_idx;
+};
+
+struct ecore_rxq_start_ret_params {
+ void OSAL_IOMEM *p_prod;
+ void *p_handle;
+};
+
+struct ecore_txq_start_ret_params {
+ void OSAL_IOMEM *p_doorbell;
+ void *p_handle;
};
struct ecore_rss_params {
@@ -48,7 +59,9 @@ struct ecore_rss_params {
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
- u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+
+ /* Indirection table consist of rx queue handles */
+ void *rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
u32 rss_key[ECORE_RSS_KEY_SIZE];
};
@@ -89,6 +102,7 @@ enum ecore_filter_ucast_type {
ECORE_FILTER_INNER_MAC_VNI_PAIR,
ECORE_FILTER_MAC_VNI_PAIR,
ECORE_FILTER_VNI,
+ ECORE_FILTER_UNUSED, /* @DPDK */
};
struct ecore_filter_ucast {
@@ -127,6 +141,14 @@ struct ecore_filter_accept_flags {
#define ECORE_ACCEPT_BCAST 0x20
};
+struct ecore_arfs_config_params {
+ bool tcp;
+ bool udp;
+ bool ipv4;
+ bool ipv6;
+ bool arfs_enable; /* Enable or disable arfs mode */
+};
+
/* Add / remove / move / remove-all unicast MAC-VLAN filters.
* FW will assert in the following cases, so driver should take care...:
* 1. Adding a filter to a full table.
@@ -159,42 +181,37 @@ ecore_filter_accept_cmd(
struct ecore_spq_comp_cb *p_comp_data);
/**
- * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod
+ * @brief ecore_eth_rx_queue_start - RX Queue Start Ramrod
*
* This ramrod initializes an RX Queue for a VPort. An Assert is generated if
* the VPort ID is not currently initialized.
*
* @param p_hwfn
* @param opaque_fid
- * @p_params [stats_id is relative, packed in p_params]
+ * @p_params Inputs; Relative for PF [SB being an exception]
* @param bd_max_bytes Maximum bytes that can be placed on a BD
* @param bd_chain_phys_addr Physical address of BDs for receive.
* @param cqe_pbl_addr Physical address of the CQE PBL Table.
* @param cqe_pbl_size Size of the CQE PBL Table
- * @param pp_prod Pointer to place producer's
- * address for the Rx Q (May be
- * NULL).
+ * @param p_ret_params Pointed struct to be filled with outputs.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM * *pp_prod);
+ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *p_ret_params);
/**
- * @brief ecore_sp_eth_rx_queue_stop -
- *
- * This ramrod closes an RX queue. It sends RX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_rx_queue_stop - This ramrod closes an Rx queue
*
* @param p_hwfn
- * @param rx_queue_id RX Queue ID
+ * @param p_rxq Handler of queue to close
* @param eq_completion_only If True completion will be on
* EQe, if False completion will be
* on EQe if p_hwfn opaque
@@ -205,13 +222,13 @@ ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only,
- bool cqe_completion);
+ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only,
+ bool cqe_completion);
/**
- * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod
+ * @brief - TX Queue Start Ramrod
*
* This ramrod initializes a TX Queue for a VPort. An Assert is generated if
* the VPort is not currently initialized.
@@ -222,34 +239,29 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
* @param tc traffic class to use with this L2 txq
* @param pbl_addr address of the pbl array
* @param pbl_size number of entries in pbl
- * @param pp_doorbell Pointer to place doorbell pointer (May be NULL).
- * This address should be used with the
- * DIRECT_REG_WR macro.
+ * @param p_ret_params Pointer to fill the return parameters in.
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
-ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
- u16 opaque_fid,
- struct ecore_queue_start_common_params *p_params,
- u8 tc,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM * *pp_doorbell);
+ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct ecore_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ struct ecore_txq_start_ret_params *p_ret_params);
/**
- * @brief ecore_sp_eth_tx_queue_stop -
- *
- * This ramrod closes a TX queue. It sends TX queue stop ramrod
- * + CFC delete ramrod
+ * @brief ecore_eth_tx_queue_stop - closes a Tx queue
*
* @param p_hwfn
- * @param tx_queue_id TX Queue ID
+ * @param p_txq - handle to Tx queue needed to be closed
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id);
+enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
+ void *p_txq);
enum ecore_tpa_mode {
ECORE_TPA_MODE_NONE,
@@ -273,6 +285,15 @@ struct ecore_sp_vport_start_params {
bool zero_placement_offset;
bool check_mac;
bool check_ethtype;
+
+ /* Strict behavior on transmission errors */
+ bool b_err_illegal_vlan_mode;
+ bool b_err_illegal_inband_mode;
+ bool b_err_vlan_insert_with_inband;
+ bool b_err_small_pkt;
+ bool b_err_big_pkt;
+ bool b_err_anti_spoof;
+ bool b_err_ctrl_frame;
};
/**
@@ -372,19 +393,19 @@ ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
* @note Final phase API.
*
* @param p_hwfn
- * @param rx_queue_id RX Queue ID
- * @param num_rxqs Allow to update multiple rx
- * queues, from rx_queue_id to
- * (rx_queue_id + num_rxqs)
+ * @param pp_rxq_handlers An array of queue handlers to be updated.
+ * @param num_rxqs number of queues to update.
* @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set
+ * @param comp_mode
+ * @param p_comp_data
*
* @return enum _ecore_status_t
*/
enum _ecore_status_t
ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ void **pp_rxq_handlers,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
@@ -401,4 +422,18 @@ void ecore_get_vport_stats(struct ecore_dev *p_dev,
void ecore_reset_vport_stats(struct ecore_dev *p_dev);
+/**
+ *@brief ecore_arfs_mode_configure -
+ *
+ *Enable or disable rfs mode. It must accept atleast one of tcp or udp true
+ *and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ *
+ *@param p_hwfn
+ *@param p_ptt
+ *@param p_cfg_params arfs mode configuration parameters.
+ *
+ */
+void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_arfs_config_params *p_cfg_params);
#endif
diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c
index 2ff97155..a834ac74 100644
--- a/drivers/net/qede/base/ecore_mcp.c
+++ b/drivers/net/qede/base/ecore_mcp.c
@@ -104,7 +104,6 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
}
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
- p_hwfn->mcp_info = OSAL_NULL;
return ECORE_SUCCESS;
}
@@ -365,6 +364,7 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_mcp_mb_params *p_mb_params)
{
+ union drv_union_data union_data;
u32 union_data_addr;
enum _ecore_status_t rc;
@@ -374,6 +374,15 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
return ECORE_BUSY;
}
+ if (p_mb_params->data_src_size > sizeof(union_data) ||
+ p_mb_params->data_dst_size > sizeof(union_data)) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size, p_mb_params->data_dst_size,
+ sizeof(union_data));
+ return ECORE_INVAL;
+ }
+
union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
OFFSETOF(struct public_drv_mb, union_data);
@@ -384,19 +393,21 @@ ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- if (p_mb_params->p_data_src != OSAL_NULL)
- ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr,
- p_mb_params->p_data_src,
- sizeof(*p_mb_params->p_data_src));
+ OSAL_MEM_ZERO(&union_data, sizeof(union_data));
+ if (p_mb_params->p_data_src != OSAL_NULL && p_mb_params->data_src_size)
+ OSAL_MEMCPY(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd,
p_mb_params->param, &p_mb_params->mcp_resp,
&p_mb_params->mcp_param);
- if (p_mb_params->p_data_dst != OSAL_NULL)
+ if (p_mb_params->p_data_dst != OSAL_NULL &&
+ p_mb_params->data_dst_size)
ecore_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
- union_data_addr,
- sizeof(*p_mb_params->p_data_dst));
+ union_data_addr, p_mb_params->data_dst_size);
ecore_mcp_mb_unlock(p_hwfn, p_mb_params->cmd);
@@ -444,14 +455,13 @@ enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
u32 i_txn_size, u32 *i_buf)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = i_buf;
+ mb_params.data_src_size = (u8)i_txn_size;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
@@ -471,13 +481,17 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
u32 *o_txn_size, u32 *o_buf)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
mb_params.param = param;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
@@ -486,7 +500,8 @@ enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
*o_mcp_param = mb_params.mcp_param;
*o_txn_size = *o_mcp_param;
- OSAL_MEMCPY(o_buf, (u32 *)&union_data.raw_data, *o_txn_size);
+ /* @DPDK */
+ OSAL_MEMCPY(o_buf, raw_data, RTE_MIN(*o_txn_size, MCP_DRV_NVM_BUF_LEN));
return ECORE_SUCCESS;
}
@@ -519,57 +534,389 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
}
#endif
+static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role)
+{
+ return (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS);
+}
+
+static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+ &resp, &param);
+ if (rc != ECORE_SUCCESS)
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send cancel load request, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0)
+#define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1)
+#define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2)
+#define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3)
+#define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4)
+#define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5)
+#define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6)
+
+static u32 ecore_get_config_bitmap(void)
+{
+ u32 config_bitmap = 0x0;
+
+#ifdef CONFIG_ECORE_L2
+ config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_SRIOV
+ config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ROCE
+ config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_IWARP
+ config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_FCOE
+ config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_ISCSI
+ config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX;
+#endif
+#ifdef CONFIG_ECORE_LL2
+ config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX;
+#endif
+
+ return config_bitmap;
+}
+
+struct ecore_load_req_in_params {
+ u8 hsi_ver;
+#define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0
+#define ECORE_LOAD_REQ_HSI_VER_1 1
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u8 drv_role;
+ u8 timeout_val;
+ u8 force_cmd;
+ bool avoid_eng_reset;
+};
+
+struct ecore_load_req_out_params {
+ u32 load_code;
+ u32 exist_drv_ver_0;
+ u32 exist_drv_ver_1;
+ u32 exist_fw_ver;
+ u8 exist_drv_role;
+ u8 mfw_hsi_ver;
+ bool drv_exists;
+};
+
+static enum _ecore_status_t
+__ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_load_req_in_params *p_in_params,
+ struct ecore_load_req_out_params *p_out_params)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ u32 hsi_ver;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&load_req, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE,
+ p_in_params->drv_role);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+ p_in_params->force_cmd);
+ ECORE_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
+
+ hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ?
+ DRV_ID_MCP_HSI_VER_CURRENT :
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+ mb_params.param,
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+ load_req.drv_ver_0, load_req.drv_ver_1,
+ load_req.fw_ver, load_req.misc0,
+ ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ ECORE_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_LOCK_TO),
+ ECORE_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ ECORE_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_FLAGS0));
+
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed to send load request, rc = %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+ p_out_params->load_code = mb_params.mcp_resp;
+
+ if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+ load_rsp.drv_ver_0, load_rsp.drv_ver_1,
+ load_rsp.fw_ver, load_rsp.misc0,
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ ECORE_MFW_GET_FIELD(load_rsp.misc0,
+ LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
+ p_out_params->exist_drv_role =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ p_out_params->mfw_hsi_ver =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ p_out_params->drv_exists =
+ ECORE_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ LOAD_RSP_FLAGS0_DRV_EXISTS;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn,
+ enum ecore_drv_role drv_role,
+ u8 *p_mfw_drv_role)
+{
+ switch (drv_role) {
+ case ECORE_DRV_ROLE_OS:
+ *p_mfw_drv_role = DRV_ROLE_OS;
+ break;
+ case ECORE_DRV_ROLE_KDUMP:
+ *p_mfw_drv_role = DRV_ROLE_KDUMP;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum ecore_load_req_force {
+ ECORE_LOAD_REQ_FORCE_NONE,
+ ECORE_LOAD_REQ_FORCE_PF,
+ ECORE_LOAD_REQ_FORCE_ALL,
+};
+
+static enum _ecore_status_t
+ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn,
+ enum ecore_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
+{
+ switch (force_cmd) {
+ case ECORE_LOAD_REQ_FORCE_NONE:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+ break;
+ case ECORE_LOAD_REQ_FORCE_PF:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+ break;
+ case ECORE_LOAD_REQ_FORCE_ALL:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 *p_load_code)
+ struct ecore_load_req_params *p_params)
{
- struct ecore_dev *p_dev = p_hwfn->p_dev;
- struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct ecore_load_req_out_params out_params;
+ struct ecore_load_req_in_params in_params;
+ u8 mfw_drv_role, mfw_force_cmd;
enum _ecore_status_t rc;
#ifndef ASIC_ONLY
if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
- ecore_mcp_mf_workaround(p_hwfn, p_load_code);
+ ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code);
return ECORE_SUCCESS;
}
#endif
- OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
- mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
- mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
- p_dev->drv_type;
- OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
- mb_params.p_data_src = &union_data;
- rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT;
+ in_params.drv_ver_0 = ECORE_VERSION;
+ in_params.drv_ver_1 = ecore_get_config_bitmap();
+ in_params.fw_ver = STORM_FW_VERSION;
+ rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ if (rc != ECORE_SUCCESS)
+ return rc;
- /* if mcp fails to respond we must abort */
- if (rc != ECORE_SUCCESS) {
- DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ in_params.drv_role = mfw_drv_role;
+ in_params.timeout_val = p_params->timeout_val;
+ rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE,
+ &mfw_force_cmd);
+ if (rc != ECORE_SUCCESS)
return rc;
- }
- *p_load_code = mb_params.mcp_resp;
+ in_params.force_cmd = mfw_force_cmd;
+ in_params.avoid_eng_reset = p_params->avoid_eng_reset;
+
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* First handle cases where another load request should/might be sent:
+ * - MFW expects the old interface [HSI version = 1]
+ * - MFW responds that a force load request is required
+ */
+ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_INFO(p_hwfn,
+ "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n");
+
+ /* The previous load request set the mailbox blocking */
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else if (out_params.load_code ==
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+ /* The previous load request set the mailbox blocking */
+ p_hwfn->mcp_info->block_mb_sending = false;
+
+ if (ecore_mcp_can_force_load(in_params.drv_role,
+ out_params.exist_drv_role)) {
+ DP_INFO(p_hwfn,
+ "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n",
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ rc = ecore_get_mfw_force_cmd(p_hwfn,
+ ECORE_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ in_params.force_cmd = mfw_force_cmd;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+ } else {
+ DP_NOTICE(p_hwfn, false,
+ "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n",
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ ecore_mcp_cancel_load_req(p_hwfn, p_ptt);
+ return ECORE_BUSY;
+ }
+ }
- /* If MFW refused (e.g. other port is in diagnostic mode) we
- * must abort. This can happen in the following cases:
- * - Other port is in diagnostic mode
- * - Previously loaded function on the engine is not compliant with
- * the requester.
- * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
- * -
+ /* Now handle the other types of responses.
+ * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+ * expected here after the additional revised load requests were sent.
*/
- if (!(*p_load_code) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
- ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
- DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
+ switch (out_params.load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 &&
+ out_params.drv_exists) {
+ /* The role and fw/driver version match, but the PF is
+ * already loaded and has not been unloaded gracefully.
+ * This is unexpected since a quasi-FLR request was
+ * previously sent as part of ecore_hw_prepare().
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n");
+ return ECORE_INVAL;
+ }
+ break;
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI:
+ case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT:
+ DP_NOTICE(p_hwfn, false,
+ "MFW refused a load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
return ECORE_BUSY;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected response to load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
+ break;
}
+ p_params->load_code = out_params.load_code;
+
return ECORE_SUCCESS;
}
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ u32 wol_param, mcp_resp, mcp_param;
+
+ /* @DPDK */
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
+ &mcp_resp, &mcp_param);
+}
+
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
+{
+ struct ecore_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
@@ -611,7 +958,6 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
enum _ecore_status_t rc;
int i;
@@ -622,8 +968,8 @@ enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
- OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
&mb_params);
if (rc != ECORE_SUCCESS) {
@@ -801,9 +1147,6 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
- if (p_link->link_up)
- ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
-
OSAL_LINK_UPDATE(p_hwfn);
}
@@ -812,8 +1155,7 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
{
struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
- struct eth_phy_cfg *p_phy_cfg;
+ struct eth_phy_cfg phy_cfg;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cmd;
@@ -823,32 +1165,30 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
#endif
/* Set the shmem configuration according to params */
- p_phy_cfg = &union_data.drv_phy_cfg;
- OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
+ OSAL_MEM_ZERO(&phy_cfg, sizeof(phy_cfg));
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
- p_phy_cfg->speed = params->speed.forced_speed;
- p_phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
- p_phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
- p_phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
- p_phy_cfg->adv_speed = params->speed.advertised_speeds;
- p_phy_cfg->loopback_mode = params->loopback_mode;
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
p_hwfn->b_drv_link_init = b_up;
if (b_up)
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
"Configuring Link: Speed 0x%08x, Pause 0x%08x,"
- " adv_speed 0x%08x, loopback 0x%08x,"
- " features 0x%08x\n",
- p_phy_cfg->speed, p_phy_cfg->pause,
- p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
- p_phy_cfg->feature_config_flags);
+ " adv_speed 0x%08x, loopback 0x%08x\n",
+ phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
+ phy_cfg.loopback_mode);
else
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = cmd;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
/* if mcp fails to respond we must abort */
@@ -927,8 +1267,8 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
enum ecore_mcp_protocol_type stats_type;
union ecore_mcp_protocol_stats stats;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 hsi_param;
+ enum _ecore_status_t rc;
switch (type) {
case MFW_DRV_MSG_GET_LAN_STATS:
@@ -936,7 +1276,7 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
break;
default:
- DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
+ DP_INFO(p_hwfn, "Invalid protocol type %d\n", type);
return;
}
@@ -945,14 +1285,15 @@ static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_STATS;
mb_params.param = hsi_param;
- OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
- mb_params.p_data_src = &union_data;
- ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
+ rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc != ECORE_SUCCESS)
+ DP_ERR(p_hwfn, "Failed to send protocol stats, rc = %d\n", rc);
}
-static void
-ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
- struct public_func *p_shmem_info)
+static void ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
{
struct ecore_mcp_function_info *p_info;
@@ -1043,28 +1384,38 @@ static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
}
+struct ecore_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
static enum _ecore_status_t
ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
- u32 mdump_cmd, union drv_union_data *p_data_src,
- union drv_union_data *p_data_dst, u32 *p_mcp_resp)
+ struct ecore_mdump_cmd_params *p_mdump_cmd_params)
{
struct ecore_mcp_mb_params mb_params;
enum _ecore_status_t rc;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
- mb_params.param = mdump_cmd;
- mb_params.p_data_src = p_data_src;
- mb_params.p_data_dst = p_data_dst;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- *p_mcp_resp = mb_params.mcp_resp;
- if (*p_mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
DP_NOTICE(p_hwfn, false,
"MFW claims that the mdump command is illegal [mdump_cmd 0x%x]\n",
- mdump_cmd);
+ p_mdump_cmd_params->cmd);
rc = ECORE_INVAL;
}
@@ -1074,99 +1425,123 @@ ecore_mcp_mdump_cmd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
static enum _ecore_status_t ecore_mcp_mdump_ack(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_ACK,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
enum _ecore_status_t ecore_mcp_mdump_set_values(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u32 epoch)
{
- union drv_union_data union_data;
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- OSAL_MEMCPY(&union_data.raw_data, &epoch, sizeof(epoch));
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_SET_VALUES;
+ mdump_cmd_params.p_data_src = &epoch;
+ mdump_cmd_params.data_src_size = sizeof(epoch);
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_SET_VALUES,
- &union_data, OSAL_NULL, &mcp_resp);
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_TRIGGER,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
-}
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_TRIGGER;
-enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
-{
- u32 mcp_resp;
-
- return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_CLEAR_LOGS,
- OSAL_NULL, OSAL_NULL, &mcp_resp);
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
static enum _ecore_status_t
ecore_mcp_mdump_get_config(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct mdump_config_stc *p_mdump_config)
{
- union drv_union_data union_data;
- u32 mcp_resp;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
enum _ecore_status_t rc;
- rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MDUMP_GET_CONFIG,
- OSAL_NULL, &union_data, &mcp_resp);
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_CONFIG;
+ mdump_cmd_params.p_data_dst = p_mdump_config;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_config);
+
+ rc = ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
if (rc != ECORE_SUCCESS)
return rc;
- /* A zero response implies that the mdump command is not supported */
- if (!mcp_resp)
+ if (mdump_cmd_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
return ECORE_NOTIMPL;
+ }
- if (mcp_resp != FW_MSG_CODE_OK) {
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
DP_NOTICE(p_hwfn, false,
"Failed to get the mdump configuration and logs info [mcp_resp 0x%x]\n",
- mcp_resp);
+ mdump_cmd_params.mcp_resp);
rc = ECORE_UNKNOWN_ERROR;
}
- OSAL_MEMCPY(p_mdump_config, &union_data.mdump_config,
- sizeof(*p_mdump_config));
-
return rc;
}
-enum _ecore_status_t ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt)
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info)
{
+ u32 addr, global_offsize, global_addr;
struct mdump_config_stc mdump_config;
enum _ecore_status_t rc;
- rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
- if (rc != ECORE_SUCCESS)
- return rc;
+ OSAL_MEMSET(p_mdump_info, 0, sizeof(*p_mdump_info));
- DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "MFW mdump_config: version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
- mdump_config.version, mdump_config.config, mdump_config.epoc,
- mdump_config.num_of_logs, mdump_config.valid_logs);
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ p_mdump_info->reason = ecore_rd(p_hwfn, p_ptt,
+ global_addr +
+ OFFSETOF(struct public_global,
+ mdump_reason));
- if (mdump_config.valid_logs > 0) {
- DP_NOTICE(p_hwfn, false,
- "* * * IMPORTANT - HW ERROR register dump captured by device * * *\n");
+ if (p_mdump_info->reason) {
+ rc = ecore_mcp_mdump_get_config(p_hwfn, p_ptt, &mdump_config);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ p_mdump_info->version = mdump_config.version;
+ p_mdump_info->config = mdump_config.config;
+ p_mdump_info->epoch = mdump_config.epoc;
+ p_mdump_info->num_of_logs = mdump_config.num_of_logs;
+ p_mdump_info->valid_logs = mdump_config.valid_logs;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d, version 0x%x, config 0x%x, epoch 0x%x, num_of_logs 0x%x, valid_logs 0x%x\n",
+ p_mdump_info->reason, p_mdump_info->version,
+ p_mdump_info->config, p_mdump_info->epoch,
+ p_mdump_info->num_of_logs, p_mdump_info->valid_logs);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "MFW mdump info: reason %d\n", p_mdump_info->reason);
}
- return rc;
+ return ECORE_SUCCESS;
}
-void ecore_mcp_mdump_enable(struct ecore_dev *p_dev, bool mdump_enable)
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt)
{
- p_dev->mdump_en = mdump_enable;
+ struct ecore_mdump_cmd_params mdump_cmd_params;
+
+ OSAL_MEM_ZERO(&mdump_cmd_params, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_CLEAR_LOGS;
+
+ return ecore_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
}
static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
@@ -1184,6 +1559,7 @@ static void ecore_mcp_handle_critical_error(struct ecore_hwfn *p_hwfn,
if (p_hwfn->p_dev->mdump_en) {
DP_NOTICE(p_hwfn, false,
"Not acknowledging the notification to allow the MFW crash dump\n");
+ p_hwfn->p_dev->mdump_en = false;
return;
}
@@ -1256,9 +1632,7 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
ecore_mcp_handle_critical_error(p_hwfn, p_ptt);
break;
default:
- /* @DPDK */
- DP_NOTICE(p_hwfn, false,
- "Unimplemented MFW message %d\n", i);
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = ECORE_INVAL;
}
}
@@ -1364,16 +1738,47 @@ enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
return ECORE_SUCCESS;
}
+/* @DPDK */
+/* Old MFW has a global configuration for all PFs regarding RDMA support */
+static void
+ecore_mcp_get_shmem_proto_legacy(struct ecore_hwfn *p_hwfn,
+ enum ecore_pci_personality *p_proto)
+{
+ *p_proto = ECORE_PCI_ETH;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to Legacy capabilities, L2 personality is %08x\n",
+ (u32)*p_proto);
+}
+
+/* @DPDK */
+static enum _ecore_status_t
+ecore_mcp_get_shmem_proto_mfw(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ enum ecore_pci_personality *p_proto)
+{
+ u32 resp = 0, param = 0;
+ enum _ecore_status_t rc;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
+ "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
+ (u32)*p_proto, resp, param);
+ return ECORE_SUCCESS;
+}
+
static enum _ecore_status_t
ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
struct public_func *p_info,
+ struct ecore_ptt *p_ptt,
enum ecore_pci_personality *p_proto)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
- *p_proto = ECORE_PCI_ETH;
+ if (ecore_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto) !=
+ ECORE_SUCCESS)
+ ecore_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
break;
default:
rc = ECORE_INVAL;
@@ -1394,7 +1799,8 @@ enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
info->pause_on_host = (shmem_info.config &
FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
- if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &info->protocol)) {
DP_ERR(p_hwfn, "Unknown personality %08x\n",
(u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
return ECORE_INVAL;
@@ -1422,6 +1828,13 @@ enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+ info->mtu = (u16)shmem_info.mtu_size;
+
+ if (info->mtu == 0)
+ info->mtu = 1500;
+
+ info->mtu = (u16)shmem_info.mtu_size;
+
DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
"Read configuration from shmem: pause_on_host %02x"
" protocol %02x BW [%02x - %02x]"
@@ -1543,8 +1956,9 @@ int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
continue;
- if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
- &protocol) != ECORE_SUCCESS)
+ if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &protocol) !=
+ ECORE_SUCCESS)
continue;
if ((1 << ((u32)protocol)) & personalities)
@@ -1636,9 +2050,8 @@ enum _ecore_status_t
ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
struct ecore_mcp_drv_version *p_ver)
{
- struct drv_version_stc *p_drv_version;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
+ struct drv_version_stc drv_version;
u32 num_words, i;
void *p_name;
OSAL_BE32 val;
@@ -1649,18 +2062,20 @@ ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
#endif
- p_drv_version = &union_data.drv_version;
- p_drv_version->version = p_ver->version;
+ OSAL_MEM_ZERO(&drv_version, sizeof(drv_version));
+ drv_version.version = p_ver->version;
num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
for (i = 0; i < num_words; i++) {
+ /* The driver name is expected to be in a big-endian format */
p_name = &p_ver->name[i * sizeof(u32)];
val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
- *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(u32 *)&drv_version.name[i * sizeof(u32)] = val;
}
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
- mb_params.p_data_src = &union_data;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
@@ -1700,22 +2115,24 @@ enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- enum ecore_ov_config_method config,
enum ecore_ov_client client)
{
enum _ecore_status_t rc;
u32 resp = 0, param = 0;
u32 drv_mb_param;
- switch (config) {
+ switch (client) {
case ECORE_OV_CLIENT_DRV:
drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
break;
case ECORE_OV_CLIENT_USER:
drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
break;
+ case ECORE_OV_CLIENT_VENDOR_SPEC:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
+ break;
default:
- DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
+ DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", client);
return ECORE_INVAL;
}
@@ -1752,9 +2169,9 @@ ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
}
rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
- drv_state, &resp, &param);
+ drv_mb_param, &resp, &param);
if (rc != ECORE_SUCCESS)
- DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ DP_ERR(p_hwfn, "Failed to send driver state\n");
return rc;
}
@@ -2251,7 +2668,7 @@ enum _ecore_status_t ecore_mcp_bist_register_test(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_bist_clock_test(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt)
{
- u32 drv_mb_param = 0, rsp, param;
+ u32 drv_mb_param, rsp, param;
enum _ecore_status_t rc = ECORE_SUCCESS;
drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
@@ -2327,28 +2744,25 @@ ecore_mcp_get_temperature_info(struct ecore_hwfn *p_hwfn,
struct ecore_temperature_info *p_temp_info)
{
struct ecore_temperature_sensor *p_temp_sensor;
- struct temperature_status_stc *p_mfw_temp_info;
+ struct temperature_status_stc mfw_temp_info;
struct ecore_mcp_mb_params mb_params;
- union drv_union_data union_data;
u32 val;
enum _ecore_status_t rc;
u8 i;
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_GET_TEMPERATURE;
- mb_params.p_data_dst = &union_data;
+ mb_params.p_data_dst = &mfw_temp_info;
+ mb_params.data_dst_size = sizeof(mfw_temp_info);
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- p_mfw_temp_info = &union_data.temp_info;
-
OSAL_BUILD_BUG_ON(ECORE_MAX_NUM_OF_SENSORS != MAX_NUM_OF_SENSORS);
- p_temp_info->num_sensors = OSAL_MIN_T(u32,
- p_mfw_temp_info->num_of_sensors,
+ p_temp_info->num_sensors = OSAL_MIN_T(u32, mfw_temp_info.num_of_sensors,
ECORE_MAX_NUM_OF_SENSORS);
for (i = 0; i < p_temp_info->num_sensors; i++) {
- val = p_mfw_temp_info->sensor[i];
+ val = mfw_temp_info.sensor[i];
p_temp_sensor = &p_temp_info->sensors[i];
p_temp_sensor->sensor_location = (val & SENSOR_LOCATION_MASK) >>
SENSOR_LOCATION_SHIFT;
@@ -2403,7 +2817,60 @@ enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
0, &rsp, (u32 *)num_events);
}
-#define ECORE_RESC_ALLOC_VERSION_MAJOR 1
+static enum resource_id_enum
+ecore_mcp_get_mfw_res_id(enum ecore_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case ECORE_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case ECORE_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case ECORE_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case ECORE_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case ECORE_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case ECORE_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case ECORE_MAC:
+ case ECORE_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case ECORE_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case ECORE_LL2_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case ECORE_RDMA_CNQ_RAM:
+ case ECORE_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case ECORE_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ case ECORE_BDQ:
+ mfw_res_id = RESOURCE_BDQ_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+#define ECORE_RESC_ALLOC_VERSION_MAJOR 2
#define ECORE_RESC_ALLOC_VERSION_MINOR 0
#define ECORE_RESC_ALLOC_VERSION \
((ECORE_RESC_ALLOC_VERSION_MAJOR << \
@@ -2411,34 +2878,146 @@ enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
(ECORE_RESC_ALLOC_VERSION_MINOR << \
DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
-enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param)
+struct ecore_resc_alloc_in_params {
+ u32 cmd;
+ enum ecore_resources res_id;
+ u32 resc_max_val;
+};
+
+struct ecore_resc_alloc_out_params {
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 resc_num;
+ u32 resc_start;
+ u32 vf_resc_num;
+ u32 vf_resc_start;
+ u32 flags;
+};
+
+static enum _ecore_status_t
+ecore_mcp_resc_allocation_msg(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_resc_alloc_in_params *p_in_params,
+ struct ecore_resc_alloc_out_params *p_out_params)
{
struct ecore_mcp_mb_params mb_params;
- union drv_union_data *p_union_data;
+ struct resource_info mfw_resc_info;
enum _ecore_status_t rc;
+ OSAL_MEM_ZERO(&mfw_resc_info, sizeof(mfw_resc_info));
+
+ mfw_resc_info.res_id = ecore_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id));
+ return ECORE_INVAL;
+ }
+
+ switch (p_in_params->cmd) {
+ case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+ mfw_resc_info.size = p_in_params->resc_max_val;
+ /* Fallthrough */
+ case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+ p_in_params->cmd);
+ return ECORE_INVAL;
+ }
+
OSAL_MEM_ZERO(&mb_params, sizeof(mb_params));
- mb_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ mb_params.cmd = p_in_params->cmd;
mb_params.param = ECORE_RESC_ALLOC_VERSION;
- p_union_data = (union drv_union_data *)p_resc_info;
- mb_params.p_data_src = p_union_data;
- mb_params.p_data_dst = p_union_data;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+ p_in_params->cmd, p_in_params->res_id,
+ ecore_hw_get_resc_name(p_in_params->res_id),
+ ECORE_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ ECORE_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_in_params->resc_max_val);
+
rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc != ECORE_SUCCESS)
return rc;
- *p_mcp_resp = mb_params.mcp_resp;
- *p_mcp_param = mb_params.mcp_param;
+ p_out_params->mcp_resp = mb_params.mcp_resp;
+ p_out_params->mcp_param = mb_params.mcp_param;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
- "MFW resource_info: version 0x%x, res_id 0x%x, size 0x%x,"
- " offset 0x%x, vf_size 0x%x, vf_offset 0x%x, flags 0x%x\n",
- *p_mcp_param, p_resc_info->res_id, p_resc_info->size,
- p_resc_info->offset, p_resc_info->vf_size,
- p_resc_info->vf_offset, p_resc_info->flags);
+ "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+ ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ ECORE_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_out_params->resc_num, p_out_params->resc_start,
+ p_out_params->vf_resc_num, p_out_params->vf_resc_start,
+ p_out_params->flags);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+ in_params.res_id = res_id;
+ in_params.resc_max_val = resc_max_val;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start)
+{
+ struct ecore_resc_alloc_out_params out_params;
+ struct ecore_resc_alloc_in_params in_params;
+ enum _ecore_status_t rc;
+
+ OSAL_MEM_ZERO(&in_params, sizeof(in_params));
+ in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ in_params.res_id = res_id;
+ OSAL_MEM_ZERO(&out_params, sizeof(out_params));
+ rc = ecore_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ *p_resc_num = out_params.resc_num;
+ *p_resc_start = out_params.resc_start;
+ }
return ECORE_SUCCESS;
}
@@ -2448,6 +3027,182 @@ enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
{
u32 mcp_resp, mcp_param;
- return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR,
- 0, &mcp_resp, &mcp_param);
+ return ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+ &mcp_resp, &mcp_param);
+}
+
+static enum _ecore_status_t ecore_mcp_resource_cmd(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u32 param, u32 *p_mcp_resp,
+ u32 *p_mcp_param)
+{
+ enum _ecore_status_t rc;
+
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
+ p_mcp_resp, p_mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The resource command is unsupported by the MFW\n");
+ return ECORE_NOTIMPL;
+ }
+
+ if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+ u8 opcode = ECORE_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+ DP_NOTICE(p_hwfn, false,
+ "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+ param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return rc;
+}
+
+enum _ecore_status_t
+__ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ switch (p_params->timeout) {
+ case ECORE_MCP_RESC_LOCK_TO_DEFAULT:
+ opcode = RESOURCE_OPCODE_REQ;
+ p_params->timeout = 0;
+ break;
+ case ECORE_MCP_RESC_LOCK_TO_NONE:
+ opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+ p_params->timeout = 0;
+ break;
+ default:
+ opcode = RESOURCE_OPCODE_REQ_W_AGING;
+ break;
+ }
+
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+ param, p_params->timeout, opcode, p_params->resource);
+
+ /* Attempt to acquire the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ p_params->owner = ECORE_MFW_GET_FIELD(mcp_param,
+ RESOURCE_CMD_RSP_OWNER);
+ opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+ mcp_param, opcode, p_params->owner);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_GNT:
+ p_params->b_granted = true;
+ break;
+ case RESOURCE_OPCODE_BUSY:
+ p_params->b_granted = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params)
+{
+ u32 retry_cnt = 0;
+ enum _ecore_status_t rc;
+
+ do {
+ /* No need for an interval before the first iteration */
+ if (retry_cnt) {
+ if (p_params->sleep_b4_retry) {
+ u16 retry_interval_in_ms =
+ DIV_ROUND_UP(p_params->retry_interval,
+ 1000);
+
+ OSAL_MSLEEP(retry_interval_in_ms);
+ } else {
+ OSAL_UDELAY(p_params->retry_interval);
+ }
+ }
+
+ rc = __ecore_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ if (p_params->b_granted)
+ break;
+ } while (retry_cnt++ < p_params->retry_num);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ enum _ecore_status_t rc;
+
+ opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+ : RESOURCE_OPCODE_RELEASE;
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ ECORE_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+ param, opcode, p_params->resource);
+
+ /* Attempt to release the resource */
+ rc = ecore_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp,
+ &mcp_param);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ /* Analyze the response */
+ opcode = ECORE_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
+ "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+ mcp_param, opcode);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+ DP_INFO(p_hwfn,
+ "Resource unlock request for an already released resource [%d]\n",
+ p_params->resource);
+ /* Fallthrough */
+ case RESOURCE_OPCODE_RELEASED:
+ p_params->b_released = true;
+ break;
+ case RESOURCE_OPCODE_WRONG_OWNER:
+ p_params->b_released = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, false,
+ "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
}
diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h
index 831890ca..37d1835f 100644
--- a/drivers/net/qede/base/ecore_mcp.h
+++ b/drivers/net/qede/base/ecore_mcp.h
@@ -11,6 +11,7 @@
#include "bcm_osal.h"
#include "mcp_public.h"
+#include "ecore.h"
#include "ecore_mcp_api.h"
/* Using hwfn number (and not pf_num) is required since in CMT mode,
@@ -64,12 +65,22 @@ struct ecore_mcp_info {
struct ecore_mcp_mb_params {
u32 cmd;
u32 param;
- union drv_union_data *p_data_src;
- union drv_union_data *p_data_dst;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
u32 mcp_resp;
u32 mcp_param;
};
+struct ecore_drv_tlv_hdr {
+ u8 tlv_type; /* According to the enum below */
+ u8 tlv_length; /* In dwords - not including this header */
+ u8 tlv_reserved;
+#define ECORE_DRV_TLV_FLAGS_CHANGED 0x01
+ u8 tlv_flags;
+};
+
/**
* @brief Initialize the interface with the MCP
*
@@ -128,32 +139,58 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
* @param p_hwfn - hw function
* @param p_ptt - PTT required for register access
* @return enum _ecore_status_t - ECORE_SUCCESS - operation
- * was successul.
+ * was successful.
*/
enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+enum ecore_drv_role {
+ ECORE_DRV_ROLE_OS,
+ ECORE_DRV_ROLE_KDUMP,
+};
+
+struct ecore_load_req_params {
+ enum ecore_drv_role drv_role;
+ u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */
+ bool avoid_eng_reset;
+ u32 load_code;
+};
+
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case operation
- * succeed, returns whether this PF is the first on the
- * chip/engine/port or function. This function should be
- * called when driver is ready to accept MFW events after
- * Storms initializations are done.
- *
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_load_code - The MCP response param containing one
- * of the following:
- * FW_MSG_CODE_DRV_LOAD_ENGINE
- * FW_MSG_CODE_DRV_LOAD_PORT
- * FW_MSG_CODE_DRV_LOAD_FUNCTION
- * @return enum _ecore_status_t -
- * ECORE_SUCCESS - Operation was successul.
- * ECORE_BUSY - Operation failed
+ * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
+ * returns whether this PF is the first on the engine/port or function.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
*/
enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- u32 *p_load_code);
+ struct ecore_load_req_params *p_params);
+
+/**
+ * @brief Sends a UNLOAD_REQ message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief Sends a UNLOAD_DONE message to the MFW
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful.
+ */
+enum _ecore_status_t ecore_mcp_unload_done(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
/**
* @brief Read the MFW mailbox into Current buffer.
@@ -327,31 +364,37 @@ enum _ecore_status_t ecore_mcp_mdump_trigger(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
/**
- * @brief - Clears the MFW crash dump logs.
+ * @brief - Sets the MFW's max value for the given resource
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param resc_max_val
+ * @param p_mcp_resp
*
- * @param return ECORE_SUCCESS upon success.
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
-enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
+enum _ecore_status_t
+ecore_mcp_set_resc_max_val(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 resc_max_val,
+ u32 *p_mcp_resp);
/**
- * @brief - Gets the MFW crash dump configuration and logs info.
+ * @brief - Gets the MFW allocation info for the given resource
*
- * @param p_hwfn
- * @param p_ptt
+ * @param p_hwfn
+ * @param p_ptt
+ * @param res_id
+ * @param p_mcp_resp
+ * @param p_resc_num
+ * @param p_resc_start
*
- * @param return ECORE_SUCCESS upon success.
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
*/
-enum _ecore_status_t ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt);
-
-enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- struct resource_info *p_resc_info,
- u32 *p_mcp_resp, u32 *p_mcp_param);
+enum _ecore_status_t
+ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ enum ecore_resources res_id, u32 *p_mcp_resp,
+ u32 *p_resc_num, u32 *p_resc_start);
/**
* @brief - Initiates PF FLR
@@ -364,4 +407,79 @@ enum _ecore_status_t ecore_mcp_get_resc_info(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t ecore_mcp_initiate_pf_flr(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt);
+#define ECORE_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP /* 0 */
+#define ECORE_MCP_RESC_LOCK_MAX_VAL 31
+
+enum ecore_resc_lock {
+ ECORE_RESC_LOCK_DBG_DUMP = ECORE_MCP_RESC_LOCK_MIN_VAL,
+ /* Locks that the MFW is aware of should be added here downwards */
+
+ /* Ecore only locks should be added here upwards */
+ ECORE_RESC_LOCK_RESC_ALLOC = ECORE_MCP_RESC_LOCK_MAX_VAL
+};
+
+struct ecore_resc_lock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Lock timeout value in seconds [default, none or 1..254] */
+ u8 timeout;
+#define ECORE_MCP_RESC_LOCK_TO_DEFAULT 0
+#define ECORE_MCP_RESC_LOCK_TO_NONE 255
+
+ /* Number of times to retry locking */
+ u8 retry_num;
+
+ /* The interval in usec between retries */
+ u16 retry_interval;
+
+ /* Use sleep or delay between retries */
+ bool sleep_b4_retry;
+
+ /* Will be set as true if the resource is free and granted */
+ bool b_granted;
+
+ /* Will be filled with the resource owner.
+ * [0..15 = PF0-15, 16 = MFW, 17 = diag over serial]
+ */
+ u8 owner;
+};
+
+/**
+ * @brief Acquires MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_lock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_lock_params *p_params);
+
+struct ecore_resc_unlock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Allow to release a resource even if belongs to another PF */
+ bool b_force;
+
+ /* Will be set as true if the resource is released */
+ bool b_released;
+};
+
+/**
+ * @brief Releases MFW generic resource lock
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_params
+ *
+ * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
+ */
+enum _ecore_status_t
+ecore_mcp_resc_unlock(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_resc_unlock_params *p_params);
+
#endif /* __ECORE_MCP_H__ */
diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h
index c26b4943..190c1352 100644
--- a/drivers/net/qede/base/ecore_mcp_api.h
+++ b/drivers/net/qede/base/ecore_mcp_api.h
@@ -84,6 +84,8 @@ struct ecore_mcp_function_info {
#define ECORE_MCP_VLAN_UNSET (0xffff)
u16 ovlan;
+
+ u16 mtu;
};
struct ecore_mcp_nvm_common {
@@ -173,15 +175,10 @@ union ecore_mcp_protocol_stats {
};
#endif
-enum ecore_ov_config_method {
- ECORE_OV_CONFIG_MTU,
- ECORE_OV_CONFIG_MAC,
- ECORE_OV_CONFIG_WOL
-};
-
enum ecore_ov_client {
ECORE_OV_CLIENT_DRV,
- ECORE_OV_CLIENT_USER
+ ECORE_OV_CLIENT_USER,
+ ECORE_OV_CLIENT_VENDOR_SPEC
};
enum ecore_ov_driver_state {
@@ -235,6 +232,297 @@ struct ecore_mba_vers {
u32 mba_vers[ECORE_MAX_NUM_OF_ROMIMG];
};
+enum ecore_mfw_tlv_type {
+ ECORE_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
+ ECORE_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
+ ECORE_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
+ ECORE_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
+ ECORE_MFW_TLV_MAX = 0x16,
+};
+
+struct ecore_mfw_tlv_generic {
+ u16 feat_flags;
+ bool feat_flags_set;
+ u64 local_mac;
+ bool local_mac_set;
+ u64 additional_mac1;
+ bool additional_mac1_set;
+ u64 additional_mac2;
+ bool additional_mac2_set;
+ u8 drv_state;
+ bool drv_state_set;
+ u8 pxe_progress;
+ bool pxe_progress_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+struct ecore_mfw_tlv_eth {
+ u16 lso_maxoff_size;
+ bool lso_maxoff_size_set;
+ u16 lso_minseg_size;
+ bool lso_minseg_size_set;
+ u8 prom_mode;
+ bool prom_mode_set;
+ u16 tx_descr_size;
+ bool tx_descr_size_set;
+ u16 rx_descr_size;
+ bool rx_descr_size_set;
+ u16 netq_count;
+ bool netq_count_set;
+ u32 tcp4_offloads;
+ bool tcp4_offloads_set;
+ u32 tcp6_offloads;
+ bool tcp6_offloads_set;
+ u16 tx_descr_qdepth;
+ bool tx_descr_qdepth_set;
+ u16 rx_descr_qdepth;
+ bool rx_descr_qdepth_set;
+ u8 iov_offload;
+ bool iov_offload_set;
+ u8 txqs_empty;
+ bool txqs_empty_set;
+ u8 rxqs_empty;
+ bool rxqs_empty_set;
+ u8 num_txqs_full;
+ bool num_txqs_full_set;
+ u8 num_rxqs_full;
+ bool num_rxqs_full_set;
+};
+
+struct ecore_mfw_tlv_fcoe {
+ u8 scsi_timeout;
+ bool scsi_timeout_set;
+ u32 rt_tov;
+ bool rt_tov_set;
+ u32 ra_tov;
+ bool ra_tov_set;
+ u32 ed_tov;
+ bool ed_tov_set;
+ u32 cr_tov;
+ bool cr_tov_set;
+ u8 boot_type;
+ bool boot_type_set;
+ u8 npiv_state;
+ bool npiv_state_set;
+ u32 num_npiv_ids;
+ bool num_npiv_ids_set;
+ u8 switch_name[8];
+ bool switch_name_set;
+ u16 switch_portnum;
+ bool switch_portnum_set;
+ u8 switch_portid[3];
+ bool switch_portid_set;
+ u8 vendor_name[8];
+ bool vendor_name_set;
+ u8 switch_model[8];
+ bool switch_model_set;
+ u8 switch_fw_version[8];
+ bool switch_fw_version_set;
+ u8 qos_pri;
+ bool qos_pri_set;
+ u8 port_alias[3];
+ bool port_alias_set;
+ u8 port_state;
+ bool port_state_set;
+ u16 fip_tx_descr_size;
+ bool fip_tx_descr_size_set;
+ u16 fip_rx_descr_size;
+ bool fip_rx_descr_size_set;
+ u16 link_failures;
+ bool link_failures_set;
+ u8 fcoe_boot_progress;
+ bool fcoe_boot_progress_set;
+ u64 rx_bcast;
+ bool rx_bcast_set;
+ u64 tx_bcast;
+ bool tx_bcast_set;
+ u16 fcoe_txq_depth;
+ bool fcoe_txq_depth_set;
+ u16 fcoe_rxq_depth;
+ bool fcoe_rxq_depth_set;
+ u64 fcoe_rx_frames;
+ bool fcoe_rx_frames_set;
+ u64 fcoe_rx_bytes;
+ bool fcoe_rx_bytes_set;
+ u64 fcoe_tx_frames;
+ bool fcoe_tx_frames_set;
+ u64 fcoe_tx_bytes;
+ bool fcoe_tx_bytes_set;
+ u16 crc_count;
+ bool crc_count_set;
+ u32 crc_err_src_fcid[5];
+ bool crc_err_src_fcid_set[5];
+ u8 crc_err_tstamp[5][14];
+ bool crc_err_tstamp_set[5];
+ u16 losync_err;
+ bool losync_err_set;
+ u16 losig_err;
+ bool losig_err_set;
+ u16 primtive_err;
+ bool primtive_err_set;
+ u16 disparity_err;
+ bool disparity_err_set;
+ u16 code_violation_err;
+ bool code_violation_err_set;
+ u32 flogi_param[4];
+ bool flogi_param_set[4];
+ u8 flogi_tstamp[14];
+ bool flogi_tstamp_set;
+ u32 flogi_acc_param[4];
+ bool flogi_acc_param_set[4];
+ u8 flogi_acc_tstamp[14];
+ bool flogi_acc_tstamp_set;
+ u32 flogi_rjt;
+ bool flogi_rjt_set;
+ u8 flogi_rjt_tstamp[14];
+ bool flogi_rjt_tstamp_set;
+ u32 fdiscs;
+ bool fdiscs_set;
+ u8 fdisc_acc;
+ bool fdisc_acc_set;
+ u8 fdisc_rjt;
+ bool fdisc_rjt_set;
+ u8 plogi;
+ bool plogi_set;
+ u8 plogi_acc;
+ bool plogi_acc_set;
+ u8 plogi_rjt;
+ bool plogi_rjt_set;
+ u32 plogi_dst_fcid[5];
+ bool plogi_dst_fcid_set[5];
+ u8 plogi_tstamp[5][14];
+ bool plogi_tstamp_set[5];
+ u32 plogi_acc_src_fcid[5];
+ bool plogi_acc_src_fcid_set[5];
+ u8 plogi_acc_tstamp[5][14];
+ bool plogi_acc_tstamp_set[5];
+ u8 tx_plogos;
+ bool tx_plogos_set;
+ u8 plogo_acc;
+ bool plogo_acc_set;
+ u8 plogo_rjt;
+ bool plogo_rjt_set;
+ u32 plogo_src_fcid[5];
+ bool plogo_src_fcid_set[5];
+ u8 plogo_tstamp[5][14];
+ bool plogo_tstamp_set[5];
+ u8 rx_logos;
+ bool rx_logos_set;
+ u8 tx_accs;
+ bool tx_accs_set;
+ u8 tx_prlis;
+ bool tx_prlis_set;
+ u8 rx_accs;
+ bool rx_accs_set;
+ u8 tx_abts;
+ bool tx_abts_set;
+ u8 rx_abts_acc;
+ bool rx_abts_acc_set;
+ u8 rx_abts_rjt;
+ bool rx_abts_rjt_set;
+ u32 abts_dst_fcid[5];
+ bool abts_dst_fcid_set[5];
+ u8 abts_tstamp[5][14];
+ bool abts_tstamp_set[5];
+ u8 rx_rscn;
+ bool rx_rscn_set;
+ u32 rx_rscn_nport[4];
+ bool rx_rscn_nport_set[4];
+ u8 tx_lun_rst;
+ bool tx_lun_rst_set;
+ u8 abort_task_sets;
+ bool abort_task_sets_set;
+ u8 tx_tprlos;
+ bool tx_tprlos_set;
+ u8 tx_nos;
+ bool tx_nos_set;
+ u8 rx_nos;
+ bool rx_nos_set;
+ u8 ols;
+ bool ols_set;
+ u8 lr;
+ bool lr_set;
+ u8 lrr;
+ bool lrr_set;
+ u8 tx_lip;
+ bool tx_lip_set;
+ u8 rx_lip;
+ bool rx_lip_set;
+ u8 eofa;
+ bool eofa_set;
+ u8 eofni;
+ bool eofni_set;
+ u8 scsi_chks;
+ bool scsi_chks_set;
+ u8 scsi_cond_met;
+ bool scsi_cond_met_set;
+ u8 scsi_busy;
+ bool scsi_busy_set;
+ u8 scsi_inter;
+ bool scsi_inter_set;
+ u8 scsi_inter_cond_met;
+ bool scsi_inter_cond_met_set;
+ u8 scsi_rsv_conflicts;
+ bool scsi_rsv_conflicts_set;
+ u8 scsi_tsk_full;
+ bool scsi_tsk_full_set;
+ u8 scsi_aca_active;
+ bool scsi_aca_active_set;
+ u8 scsi_tsk_abort;
+ bool scsi_tsk_abort_set;
+ u32 scsi_rx_chk[5];
+ bool scsi_rx_chk_set[5];
+ u8 scsi_chk_tstamp[5][14];
+ bool scsi_chk_tstamp_set[5];
+};
+
+struct ecore_mfw_tlv_iscsi {
+ u8 target_llmnr;
+ bool target_llmnr_set;
+ u8 header_digest;
+ bool header_digest_set;
+ u8 data_digest;
+ bool data_digest_set;
+ u8 auth_method;
+ bool auth_method_set;
+ u16 boot_taget_portal;
+ bool boot_taget_portal_set;
+ u16 frame_size;
+ bool frame_size_set;
+ u16 tx_desc_size;
+ bool tx_desc_size_set;
+ u16 rx_desc_size;
+ bool rx_desc_size_set;
+ u8 boot_progress;
+ bool boot_progress_set;
+ u16 tx_desc_qdepth;
+ bool tx_desc_qdepth_set;
+ u16 rx_desc_qdepth;
+ bool rx_desc_qdepth_set;
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+union ecore_mfw_tlv_data {
+ struct ecore_mfw_tlv_generic generic;
+ struct ecore_mfw_tlv_eth eth;
+ struct ecore_mfw_tlv_fcoe fcoe;
+ struct ecore_mfw_tlv_iscsi iscsi;
+};
+
/**
* @brief - returns the link params of the hw function
*
@@ -452,7 +740,6 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
*
* @param p_hwfn
* @param p_ptt
- * @param config - Configuation that has been updated
* @param client - ecore client type
*
* @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful.
@@ -460,7 +747,6 @@ enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
enum _ecore_status_t
ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- enum ecore_ov_config_method config,
enum ecore_ov_client client);
/**
@@ -792,14 +1078,49 @@ enum _ecore_status_t ecore_mcp_mem_ecc_events(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u64 *num_events);
+struct ecore_mdump_info {
+ u32 reason;
+ u32 version;
+ u32 config;
+ u32 epoch;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+/**
+ * @brief - Gets the MFW crash dump configuration and logs info.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mdump_info
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t
+ecore_mcp_mdump_get_info(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
+ struct ecore_mdump_info *p_mdump_info);
+
/**
- * @brief Sets whether a critical error notification from the MFW is acked, or
- * is it being ignored and thus allowing the MFW crash dump.
+ * @brief - Clears the MFW crash dump logs.
*
- * @param p_dev
- * @param mdump_enable
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return ECORE_SUCCESS upon success.
+ */
+enum _ecore_status_t ecore_mcp_mdump_clear_logs(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
+
+/**
+ * @brief - Processes the TLV request from MFW i.e., get the required TLV info
+ * from the ecore client and send it to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
*
+ * @param return ECORE_SUCCESS upon success.
*/
-void ecore_mcp_mdump_enable(struct ecore_dev *p_dev, bool mdump_enable);
+enum _ecore_status_t ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt);
#endif
diff --git a/drivers/net/qede/base/ecore_mng_tlv.c b/drivers/net/qede/base/ecore_mng_tlv.c
new file mode 100644
index 00000000..0bf1be88
--- /dev/null
+++ b/drivers/net/qede/base/ecore_mng_tlv.c
@@ -0,0 +1,1535 @@
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_mcp.h"
+#include "ecore_hw.h"
+#include "reg_addr.h"
+
+#define TLV_TYPE(p) (p[0])
+#define TLV_LENGTH(p) (p[1])
+#define TLV_FLAGS(p) (p[3])
+
+static enum _ecore_status_t
+ecore_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+{
+ switch (tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ case DRV_TLV_OS_DRIVER_STATES:
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ case DRV_TLV_TX_FRAMES_SENT:
+ case DRV_TLV_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_GENERIC;
+ break;
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ case DRV_TLV_PROMISCUOUS_MODE:
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_IOV_OFFLOAD:
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ case DRV_TLV_TX_QUEUES_FULL:
+ case DRV_TLV_RX_QUEUES_FULL:
+ *tlv_group |= ECORE_MFW_TLV_ETH;
+ break;
+ case DRV_TLV_SCSI_TO:
+ case DRV_TLV_R_T_TOV:
+ case DRV_TLV_R_A_TOV:
+ case DRV_TLV_E_D_TOV:
+ case DRV_TLV_CR_TOV:
+ case DRV_TLV_BOOT_TYPE:
+ case DRV_TLV_NPIV_STATE:
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ case DRV_TLV_SWITCH_NAME:
+ case DRV_TLV_SWITCH_PORT_NUM:
+ case DRV_TLV_SWITCH_PORT_ID:
+ case DRV_TLV_VENDOR_NAME:
+ case DRV_TLV_SWITCH_MODEL:
+ case DRV_TLV_SWITCH_FW_VER:
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ case DRV_TLV_PORT_ALIAS:
+ case DRV_TLV_PORT_STATE:
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ case DRV_TLV_CRC_ERROR_COUNT:
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_RJT:
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ case DRV_TLV_LOGOS_ISSUED:
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ case DRV_TLV_LOGOS_RECEIVED:
+ case DRV_TLV_ACCS_ISSUED:
+ case DRV_TLV_PRLIS_ISSUED:
+ case DRV_TLV_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_SENT_COUNT:
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ case DRV_TLV_RSCNS_RECEIVED:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ case DRV_TLV_TPRLOS_SENT:
+ case DRV_TLV_NOS_SENT_COUNT:
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ case DRV_TLV_OLS_COUNT:
+ case DRV_TLV_LR_COUNT:
+ case DRV_TLV_LRR_COUNT:
+ case DRV_TLV_LIP_SENT_COUNT:
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ case DRV_TLV_EOFA_COUNT:
+ case DRV_TLV_EOFNI_COUNT:
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ *tlv_group = ECORE_MFW_TLV_FCOE;
+ break;
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ case DRV_TLV_MAX_FRAME_SIZE:
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ *tlv_group |= ECORE_MFW_TLV_ISCSI;
+ break;
+ default:
+ return ECORE_INVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int
+ecore_mfw_get_gen_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_generic *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ if (p_drv_buf->feat_flags_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->feat_flags;
+ return sizeof(p_drv_buf->feat_flags);
+ }
+ break;
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ if (p_drv_buf->local_mac_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->local_mac;
+ return sizeof(p_drv_buf->local_mac);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ if (p_drv_buf->additional_mac1_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac1;
+ return sizeof(p_drv_buf->additional_mac1);
+ }
+ break;
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ if (p_drv_buf->additional_mac2_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->additional_mac2;
+ return sizeof(p_drv_buf->additional_mac2);
+ }
+ break;
+ case DRV_TLV_OS_DRIVER_STATES:
+ if (p_drv_buf->drv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->drv_state;
+ return sizeof(p_drv_buf->drv_state);
+ }
+ break;
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ if (p_drv_buf->pxe_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->pxe_progress;
+ return sizeof(p_drv_buf->pxe_progress);
+ }
+ break;
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_eth_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_eth *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ if (p_drv_buf->lso_maxoff_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_maxoff_size;
+ return sizeof(p_drv_buf->lso_maxoff_size);
+ }
+ break;
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ if (p_drv_buf->lso_minseg_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lso_minseg_size;
+ return sizeof(p_drv_buf->lso_minseg_size);
+ }
+ break;
+ case DRV_TLV_PROMISCUOUS_MODE:
+ if (p_drv_buf->prom_mode_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->prom_mode;
+ return sizeof(p_drv_buf->prom_mode);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_size;
+ return sizeof(p_drv_buf->tx_descr_size);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_size;
+ return sizeof(p_drv_buf->rx_descr_size);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ if (p_drv_buf->netq_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->netq_count;
+ return sizeof(p_drv_buf->netq_count);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ if (p_drv_buf->tcp4_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp4_offloads;
+ return sizeof(p_drv_buf->tcp4_offloads);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ if (p_drv_buf->tcp6_offloads_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tcp6_offloads;
+ return sizeof(p_drv_buf->tcp6_offloads);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_descr_qdepth;
+ return sizeof(p_drv_buf->tx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_descr_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_descr_qdepth;
+ return sizeof(p_drv_buf->rx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_IOV_OFFLOAD:
+ if (p_drv_buf->iov_offload_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->iov_offload;
+ return sizeof(p_drv_buf->iov_offload);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ if (p_drv_buf->txqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->txqs_empty;
+ return sizeof(p_drv_buf->txqs_empty);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ if (p_drv_buf->rxqs_empty_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rxqs_empty;
+ return sizeof(p_drv_buf->rxqs_empty);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_FULL:
+ if (p_drv_buf->num_txqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_txqs_full;
+ return sizeof(p_drv_buf->num_txqs_full);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_FULL:
+ if (p_drv_buf->num_rxqs_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_rxqs_full;
+ return sizeof(p_drv_buf->num_rxqs_full);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_fcoe_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_fcoe *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_SCSI_TO:
+ if (p_drv_buf->scsi_timeout_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_timeout;
+ return sizeof(p_drv_buf->scsi_timeout);
+ }
+ break;
+ case DRV_TLV_R_T_TOV:
+ if (p_drv_buf->rt_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rt_tov;
+ return sizeof(p_drv_buf->rt_tov);
+ }
+ break;
+ case DRV_TLV_R_A_TOV:
+ if (p_drv_buf->ra_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ra_tov;
+ return sizeof(p_drv_buf->ra_tov);
+ }
+ break;
+ case DRV_TLV_E_D_TOV:
+ if (p_drv_buf->ed_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ed_tov;
+ return sizeof(p_drv_buf->ed_tov);
+ }
+ break;
+ case DRV_TLV_CR_TOV:
+ if (p_drv_buf->cr_tov_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->cr_tov;
+ return sizeof(p_drv_buf->cr_tov);
+ }
+ break;
+ case DRV_TLV_BOOT_TYPE:
+ if (p_drv_buf->boot_type_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_type;
+ return sizeof(p_drv_buf->boot_type);
+ }
+ break;
+ case DRV_TLV_NPIV_STATE:
+ if (p_drv_buf->npiv_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->npiv_state;
+ return sizeof(p_drv_buf->npiv_state);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ if (p_drv_buf->num_npiv_ids_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->num_npiv_ids;
+ return sizeof(p_drv_buf->num_npiv_ids);
+ }
+ break;
+ case DRV_TLV_SWITCH_NAME:
+ if (p_drv_buf->switch_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_name;
+ return sizeof(p_drv_buf->switch_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_NUM:
+ if (p_drv_buf->switch_portnum_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portnum;
+ return sizeof(p_drv_buf->switch_portnum);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_ID:
+ if (p_drv_buf->switch_portid_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_portid;
+ return sizeof(p_drv_buf->switch_portid);
+ }
+ break;
+ case DRV_TLV_VENDOR_NAME:
+ if (p_drv_buf->vendor_name_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->vendor_name;
+ return sizeof(p_drv_buf->vendor_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_MODEL:
+ if (p_drv_buf->switch_model_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_model;
+ return sizeof(p_drv_buf->switch_model);
+ }
+ break;
+ case DRV_TLV_SWITCH_FW_VER:
+ if (p_drv_buf->switch_fw_version_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->switch_fw_version;
+ return sizeof(p_drv_buf->switch_fw_version);
+ }
+ break;
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ if (p_drv_buf->qos_pri_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->qos_pri;
+ return sizeof(p_drv_buf->qos_pri);
+ }
+ break;
+ case DRV_TLV_PORT_ALIAS:
+ if (p_drv_buf->port_alias_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_alias;
+ return sizeof(p_drv_buf->port_alias);
+ }
+ break;
+ case DRV_TLV_PORT_STATE:
+ if (p_drv_buf->port_state_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->port_state;
+ return sizeof(p_drv_buf->port_state);
+ }
+ break;
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_tx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_tx_descr_size;
+ return sizeof(p_drv_buf->fip_tx_descr_size);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_rx_descr_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fip_rx_descr_size;
+ return sizeof(p_drv_buf->fip_rx_descr_size);
+ }
+ break;
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ if (p_drv_buf->link_failures_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->link_failures;
+ return sizeof(p_drv_buf->link_failures);
+ }
+ break;
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ if (p_drv_buf->fcoe_boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_boot_progress;
+ return sizeof(p_drv_buf->fcoe_boot_progress);
+ }
+ break;
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ if (p_drv_buf->rx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bcast;
+ return sizeof(p_drv_buf->rx_bcast);
+ }
+ break;
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ if (p_drv_buf->tx_bcast_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bcast;
+ return sizeof(p_drv_buf->tx_bcast);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_txq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_txq_depth;
+ return sizeof(p_drv_buf->fcoe_txq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_rxq_depth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rxq_depth;
+ return sizeof(p_drv_buf->fcoe_rxq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_frames;
+ return sizeof(p_drv_buf->fcoe_rx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_rx_bytes;
+ return sizeof(p_drv_buf->fcoe_rx_bytes);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ if (p_drv_buf->fcoe_tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_frames;
+ return sizeof(p_drv_buf->fcoe_tx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ if (p_drv_buf->fcoe_tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fcoe_tx_bytes;
+ return sizeof(p_drv_buf->fcoe_tx_bytes);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_COUNT:
+ if (p_drv_buf->crc_count_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_count;
+ return sizeof(p_drv_buf->crc_count);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[0];
+ return sizeof(p_drv_buf->crc_err_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[1];
+ return sizeof(p_drv_buf->crc_err_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[2];
+ return sizeof(p_drv_buf->crc_err_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[3];
+ return sizeof(p_drv_buf->crc_err_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->crc_err_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_src_fcid[4];
+ return sizeof(p_drv_buf->crc_err_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[0];
+ return sizeof(p_drv_buf->crc_err_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[1];
+ return sizeof(p_drv_buf->crc_err_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[2];
+ return sizeof(p_drv_buf->crc_err_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[3];
+ return sizeof(p_drv_buf->crc_err_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ if (p_drv_buf->crc_err_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->crc_err_tstamp[4];
+ return sizeof(p_drv_buf->crc_err_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ if (p_drv_buf->losync_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losync_err;
+ return sizeof(p_drv_buf->losync_err);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ if (p_drv_buf->losig_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->losig_err;
+ return sizeof(p_drv_buf->losig_err);
+ }
+ break;
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ if (p_drv_buf->primtive_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->primtive_err;
+ return sizeof(p_drv_buf->primtive_err);
+ }
+ break;
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ if (p_drv_buf->disparity_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->disparity_err;
+ return sizeof(p_drv_buf->disparity_err);
+ }
+ break;
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ if (p_drv_buf->code_violation_err_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->code_violation_err;
+ return sizeof(p_drv_buf->code_violation_err);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[0];
+ return sizeof(p_drv_buf->flogi_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[1];
+ return sizeof(p_drv_buf->flogi_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[2];
+ return sizeof(p_drv_buf->flogi_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_param[3];
+ return sizeof(p_drv_buf->flogi_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ if (p_drv_buf->flogi_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_tstamp;
+ return sizeof(p_drv_buf->flogi_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ if (p_drv_buf->flogi_acc_param_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[0];
+ return sizeof(p_drv_buf->flogi_acc_param[0]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ if (p_drv_buf->flogi_acc_param_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[1];
+ return sizeof(p_drv_buf->flogi_acc_param[1]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ if (p_drv_buf->flogi_acc_param_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[2];
+ return sizeof(p_drv_buf->flogi_acc_param[2]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ if (p_drv_buf->flogi_acc_param_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_param[3];
+ return sizeof(p_drv_buf->flogi_acc_param[3]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ if (p_drv_buf->flogi_acc_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_acc_tstamp;
+ return sizeof(p_drv_buf->flogi_acc_tstamp);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT:
+ if (p_drv_buf->flogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt;
+ return sizeof(p_drv_buf->flogi_rjt);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ if (p_drv_buf->flogi_rjt_tstamp_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->flogi_rjt_tstamp;
+ return sizeof(p_drv_buf->flogi_rjt_tstamp);
+ }
+ break;
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ if (p_drv_buf->fdiscs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdiscs;
+ return sizeof(p_drv_buf->fdiscs);
+ }
+ break;
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ if (p_drv_buf->fdisc_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_acc;
+ return sizeof(p_drv_buf->fdisc_acc);
+ }
+ break;
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ if (p_drv_buf->fdisc_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->fdisc_rjt;
+ return sizeof(p_drv_buf->fdisc_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ if (p_drv_buf->plogi_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi;
+ return sizeof(p_drv_buf->plogi);
+ }
+ break;
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ if (p_drv_buf->plogi_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc;
+ return sizeof(p_drv_buf->plogi_acc);
+ }
+ break;
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ if (p_drv_buf->plogi_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_rjt;
+ return sizeof(p_drv_buf->plogi_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[0];
+ return sizeof(p_drv_buf->plogi_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[1];
+ return sizeof(p_drv_buf->plogi_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[2];
+ return sizeof(p_drv_buf->plogi_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[3];
+ return sizeof(p_drv_buf->plogi_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->plogi_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_dst_fcid[4];
+ return sizeof(p_drv_buf->plogi_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[0];
+ return sizeof(p_drv_buf->plogi_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[1];
+ return sizeof(p_drv_buf->plogi_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[2];
+ return sizeof(p_drv_buf->plogi_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[3];
+ return sizeof(p_drv_buf->plogi_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ if (p_drv_buf->plogi_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_tstamp[4];
+ return sizeof(p_drv_buf->plogi_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[0];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[1];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[2];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[3];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogi_acc_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_src_fcid[4];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[0];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[1];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[2];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[3];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ if (p_drv_buf->plogi_acc_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogi_acc_tstamp[4];
+ return sizeof(p_drv_buf->plogi_acc_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_ISSUED:
+ if (p_drv_buf->tx_plogos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_plogos;
+ return sizeof(p_drv_buf->tx_plogos);
+ }
+ break;
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ if (p_drv_buf->plogo_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_acc;
+ return sizeof(p_drv_buf->plogo_acc);
+ }
+ break;
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ if (p_drv_buf->plogo_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_rjt;
+ return sizeof(p_drv_buf->plogo_rjt);
+ }
+ break;
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[0];
+ return sizeof(p_drv_buf->plogo_src_fcid[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[1];
+ return sizeof(p_drv_buf->plogo_src_fcid[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[2];
+ return sizeof(p_drv_buf->plogo_src_fcid[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[3];
+ return sizeof(p_drv_buf->plogo_src_fcid[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ if (p_drv_buf->plogo_src_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_src_fcid[4];
+ return sizeof(p_drv_buf->plogo_src_fcid[4]);
+ }
+ break;
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[0];
+ return sizeof(p_drv_buf->plogo_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[1];
+ return sizeof(p_drv_buf->plogo_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[2];
+ return sizeof(p_drv_buf->plogo_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[3];
+ return sizeof(p_drv_buf->plogo_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ if (p_drv_buf->plogo_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->plogo_tstamp[4];
+ return sizeof(p_drv_buf->plogo_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_LOGOS_RECEIVED:
+ if (p_drv_buf->rx_logos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_logos;
+ return sizeof(p_drv_buf->rx_logos);
+ }
+ break;
+ case DRV_TLV_ACCS_ISSUED:
+ if (p_drv_buf->tx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_accs;
+ return sizeof(p_drv_buf->tx_accs);
+ }
+ break;
+ case DRV_TLV_PRLIS_ISSUED:
+ if (p_drv_buf->tx_prlis_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_prlis;
+ return sizeof(p_drv_buf->tx_prlis);
+ }
+ break;
+ case DRV_TLV_ACCS_RECEIVED:
+ if (p_drv_buf->rx_accs_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_accs;
+ return sizeof(p_drv_buf->rx_accs);
+ }
+ break;
+ case DRV_TLV_ABTS_SENT_COUNT:
+ if (p_drv_buf->tx_abts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_abts;
+ return sizeof(p_drv_buf->tx_abts);
+ }
+ break;
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ if (p_drv_buf->rx_abts_acc_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_acc;
+ return sizeof(p_drv_buf->rx_abts_acc);
+ }
+ break;
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ if (p_drv_buf->rx_abts_rjt_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_abts_rjt;
+ return sizeof(p_drv_buf->rx_abts_rjt);
+ }
+ break;
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[0];
+ return sizeof(p_drv_buf->abts_dst_fcid[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[1];
+ return sizeof(p_drv_buf->abts_dst_fcid[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[2];
+ return sizeof(p_drv_buf->abts_dst_fcid[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[3];
+ return sizeof(p_drv_buf->abts_dst_fcid[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ if (p_drv_buf->abts_dst_fcid_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_dst_fcid[4];
+ return sizeof(p_drv_buf->abts_dst_fcid[4]);
+ }
+ break;
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[0];
+ return sizeof(p_drv_buf->abts_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[1];
+ return sizeof(p_drv_buf->abts_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[2];
+ return sizeof(p_drv_buf->abts_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[3];
+ return sizeof(p_drv_buf->abts_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ if (p_drv_buf->abts_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abts_tstamp[4];
+ return sizeof(p_drv_buf->abts_tstamp[4]);
+ }
+ break;
+ case DRV_TLV_RSCNS_RECEIVED:
+ if (p_drv_buf->rx_rscn_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn;
+ return sizeof(p_drv_buf->rx_rscn);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ if (p_drv_buf->rx_rscn_nport_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[0];
+ return sizeof(p_drv_buf->rx_rscn_nport[0]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ if (p_drv_buf->rx_rscn_nport_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[1];
+ return sizeof(p_drv_buf->rx_rscn_nport[1]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ if (p_drv_buf->rx_rscn_nport_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[2];
+ return sizeof(p_drv_buf->rx_rscn_nport[2]);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ if (p_drv_buf->rx_rscn_nport_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_rscn_nport[3];
+ return sizeof(p_drv_buf->rx_rscn_nport[3]);
+ }
+ break;
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ if (p_drv_buf->tx_lun_rst_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lun_rst;
+ return sizeof(p_drv_buf->tx_lun_rst);
+ }
+ break;
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ if (p_drv_buf->abort_task_sets_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->abort_task_sets;
+ return sizeof(p_drv_buf->abort_task_sets);
+ }
+ break;
+ case DRV_TLV_TPRLOS_SENT:
+ if (p_drv_buf->tx_tprlos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_tprlos;
+ return sizeof(p_drv_buf->tx_tprlos);
+ }
+ break;
+ case DRV_TLV_NOS_SENT_COUNT:
+ if (p_drv_buf->tx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_nos;
+ return sizeof(p_drv_buf->tx_nos);
+ }
+ break;
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ if (p_drv_buf->rx_nos_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_nos;
+ return sizeof(p_drv_buf->rx_nos);
+ }
+ break;
+ case DRV_TLV_OLS_COUNT:
+ if (p_drv_buf->ols_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->ols;
+ return sizeof(p_drv_buf->ols);
+ }
+ break;
+ case DRV_TLV_LR_COUNT:
+ if (p_drv_buf->lr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lr;
+ return sizeof(p_drv_buf->lr);
+ }
+ break;
+ case DRV_TLV_LRR_COUNT:
+ if (p_drv_buf->lrr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->lrr;
+ return sizeof(p_drv_buf->lrr);
+ }
+ break;
+ case DRV_TLV_LIP_SENT_COUNT:
+ if (p_drv_buf->tx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_lip;
+ return sizeof(p_drv_buf->tx_lip);
+ }
+ break;
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ if (p_drv_buf->rx_lip_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_lip;
+ return sizeof(p_drv_buf->rx_lip);
+ }
+ break;
+ case DRV_TLV_EOFA_COUNT:
+ if (p_drv_buf->eofa_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofa;
+ return sizeof(p_drv_buf->eofa);
+ }
+ break;
+ case DRV_TLV_EOFNI_COUNT:
+ if (p_drv_buf->eofni_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->eofni;
+ return sizeof(p_drv_buf->eofni);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ if (p_drv_buf->scsi_chks_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chks;
+ return sizeof(p_drv_buf->scsi_chks);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_cond_met;
+ return sizeof(p_drv_buf->scsi_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ if (p_drv_buf->scsi_busy_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_busy;
+ return sizeof(p_drv_buf->scsi_busy);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ if (p_drv_buf->scsi_inter_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter;
+ return sizeof(p_drv_buf->scsi_inter);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_inter_cond_met_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_inter_cond_met;
+ return sizeof(p_drv_buf->scsi_inter_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ if (p_drv_buf->scsi_rsv_conflicts_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rsv_conflicts;
+ return sizeof(p_drv_buf->scsi_rsv_conflicts);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ if (p_drv_buf->scsi_tsk_full_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_full;
+ return sizeof(p_drv_buf->scsi_tsk_full);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ if (p_drv_buf->scsi_aca_active_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_aca_active;
+ return sizeof(p_drv_buf->scsi_aca_active);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ if (p_drv_buf->scsi_tsk_abort_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_tsk_abort;
+ return sizeof(p_drv_buf->scsi_tsk_abort);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[0];
+ return sizeof(p_drv_buf->scsi_rx_chk[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[1];
+ return sizeof(p_drv_buf->scsi_rx_chk[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[2];
+ return sizeof(p_drv_buf->scsi_rx_chk[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[3];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ if (p_drv_buf->scsi_rx_chk_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_rx_chk[4];
+ return sizeof(p_drv_buf->scsi_rx_chk[4]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[0]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[0];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[0]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[1]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[1];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[1]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[2]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[2];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[2]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[3]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[3];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[3]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ if (p_drv_buf->scsi_chk_tstamp_set[4]) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->scsi_chk_tstamp[4];
+ return sizeof(p_drv_buf->scsi_chk_tstamp[4]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+ecore_mfw_get_iscsi_tlv_value(struct ecore_drv_tlv_hdr *p_tlv,
+ struct ecore_mfw_tlv_iscsi *p_drv_buf,
+ u8 **p_tlv_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ if (p_drv_buf->target_llmnr_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->target_llmnr;
+ return sizeof(p_drv_buf->target_llmnr);
+ }
+ break;
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->header_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->header_digest;
+ return sizeof(p_drv_buf->header_digest);
+ }
+ break;
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->data_digest_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->data_digest;
+ return sizeof(p_drv_buf->data_digest);
+ }
+ break;
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ if (p_drv_buf->auth_method_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->auth_method;
+ return sizeof(p_drv_buf->auth_method);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ if (p_drv_buf->boot_taget_portal_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_taget_portal;
+ return sizeof(p_drv_buf->boot_taget_portal);
+ }
+ break;
+ case DRV_TLV_MAX_FRAME_SIZE:
+ if (p_drv_buf->frame_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->frame_size;
+ return sizeof(p_drv_buf->frame_size);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_size;
+ return sizeof(p_drv_buf->tx_desc_size);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_desc_size_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_size;
+ return sizeof(p_drv_buf->rx_desc_size);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ if (p_drv_buf->boot_progress_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->boot_progress;
+ return sizeof(p_drv_buf->boot_progress);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_desc_qdepth;
+ return sizeof(p_drv_buf->tx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_desc_qdepth_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_desc_qdepth;
+ return sizeof(p_drv_buf->rx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ *p_tlv_buf = (u8 *)&p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static enum _ecore_status_t
+ecore_mfw_update_tlvs(u8 tlv_group, struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt, u8 *p_mfw_buf, u32 size)
+{
+ union ecore_mfw_tlv_data *p_tlv_data;
+ struct ecore_drv_tlv_hdr tlv;
+ u8 *p_tlv_ptr = OSAL_NULL, *p_temp;
+ u32 offset;
+ int len;
+
+ p_tlv_data = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_tlv_data));
+ if (!p_tlv_data)
+ return ECORE_NOMEM;
+
+ if (OSAL_MFW_FILL_TLV_DATA(p_hwfn, tlv_group, p_tlv_data)) {
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+ return ECORE_INVAL;
+ }
+
+ offset = 0;
+ OSAL_MEMSET(&tlv, 0, sizeof(tlv));
+ while (offset < size) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ tlv.tlv_flags = TLV_FLAGS(p_temp);
+ DP_INFO(p_hwfn, "Type %d length = %d flags = 0x%x\n",
+ tlv.tlv_type, tlv.tlv_length, tlv.tlv_flags);
+
+ offset += sizeof(tlv);
+ if (tlv_group == ECORE_MFW_TLV_GENERIC)
+ len = ecore_mfw_get_gen_tlv_value(&tlv,
+ &p_tlv_data->generic, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_ETH)
+ len = ecore_mfw_get_eth_tlv_value(&tlv,
+ &p_tlv_data->eth, &p_tlv_ptr);
+ else if (tlv_group == ECORE_MFW_TLV_FCOE)
+ len = ecore_mfw_get_fcoe_tlv_value(&tlv,
+ &p_tlv_data->fcoe, &p_tlv_ptr);
+ else
+ len = ecore_mfw_get_iscsi_tlv_value(&tlv,
+ &p_tlv_data->iscsi, &p_tlv_ptr);
+
+ if (len > 0) {
+ OSAL_WARN(len > 4 * tlv.tlv_length,
+ "Incorrect MFW TLV length");
+ len = OSAL_MIN_T(int, len, 4 * tlv.tlv_length);
+ tlv.tlv_flags |= ECORE_DRV_TLV_FLAGS_CHANGED;
+ /* TODO: Endianness handling? */
+ OSAL_MEMCPY(p_mfw_buf, &tlv, sizeof(tlv));
+ OSAL_MEMCPY(p_mfw_buf + offset, p_tlv_ptr, len);
+ }
+
+ offset += sizeof(u32) * tlv.tlv_length;
+ }
+
+ OSAL_VFREE(p_hwfn->p_dev, p_tlv_data);
+
+ return ECORE_SUCCESS;
+}
+
+enum _ecore_status_t
+ecore_mfw_process_tlv_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+ u32 addr, size, offset, resp, param, val;
+ u8 tlv_group = 0, id, *p_mfw_buf = OSAL_NULL, *p_temp;
+ u32 global_offsize, global_addr;
+ enum _ecore_status_t rc;
+ struct ecore_drv_tlv_hdr tlv;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = ecore_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + OFFSETOF(struct public_global, data_ptr);
+ size = ecore_rd(p_hwfn, p_ptt, global_addr +
+ OFFSETOF(struct public_global, data_size));
+
+ if (!size) {
+ DP_NOTICE(p_hwfn, false, "Invalid TLV req size = %d\n", size);
+ goto drv_done;
+ }
+
+ p_mfw_buf = (void *)OSAL_VZALLOC(p_hwfn->p_dev, size);
+ if (!p_mfw_buf) {
+ DP_NOTICE(p_hwfn, false,
+ "Failed allocate memory for p_mfw_buf\n");
+ goto drv_done;
+ }
+
+ /* Read the TLV request to local buffer */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = ecore_rd(p_hwfn, p_ptt, addr + offset);
+ OSAL_MEMCPY(&p_mfw_buf[offset], &val, sizeof(u32));
+ }
+
+ /* Parse the headers to enumerate the requested TLV groups */
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ if (ecore_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
+ goto drv_done;
+ }
+
+ /* Update the TLV values in the local buffer */
+ for (id = ECORE_MFW_TLV_GENERIC; id < ECORE_MFW_TLV_MAX; id <<= 1) {
+ if (tlv_group & id) {
+ if (ecore_mfw_update_tlvs(id, p_hwfn, p_ptt, p_mfw_buf,
+ size))
+ goto drv_done;
+ }
+ }
+
+ /* Write the TLV data to shared memory */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = (u32)p_mfw_buf[offset];
+ ecore_wr(p_hwfn, p_ptt, addr + offset, val);
+ offset += sizeof(u32);
+ }
+
+drv_done:
+ rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
+ &param);
+
+ OSAL_VFREE(p_hwfn->p_dev, p_mfw_buf);
+
+ return rc;
+}
diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h
index e252d528..226e3d2a 100644
--- a/drivers/net/qede/base/ecore_proto_if.h
+++ b/drivers/net/qede/base/ecore_proto_if.h
@@ -21,6 +21,12 @@ struct ecore_eth_pf_params {
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
+
+ /* To enable arfs, previous to HW-init a positive number needs to be
+ * set [as filters require allocated searcher ILT memory].
+ * This will set the maximal number of configured steering-filters.
+ */
+ u32 num_arfs_filters;
};
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
@@ -63,6 +69,12 @@ struct ecore_iscsi_pf_params {
u8 bdq_pbl_num_entries[2];
};
+enum ecore_rdma_protocol {
+ ECORE_RDMA_PROTOCOL_DEFAULT,
+ ECORE_RDMA_PROTOCOL_ROCE,
+ ECORE_RDMA_PROTOCOL_IWARP,
+};
+
struct ecore_rdma_pf_params {
/* Supplied to ECORE during resource allocation (may affect the ILT and
* the doorbell BAR).
@@ -76,6 +88,10 @@ struct ecore_rdma_pf_params {
/* Will allocate rate limiters to be used with QPs */
u8 enable_dcqcn;
+
+ /* TCP port number used for the iwarp traffic */
+ u16 iwarp_port;
+ enum ecore_rdma_protocol rdma_protocol;
};
struct ecore_pf_params {
diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h
index 01a29e31..846dc6d1 100644
--- a/drivers/net/qede/base/ecore_rt_defs.h
+++ b/drivers/net/qede/base/ecore_rt_defs.h
@@ -115,339 +115,338 @@
#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28716
#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29132
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29707
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29708
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29709
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29710
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29711
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29839
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29859
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29879
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29880
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29881
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29882
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29896
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29897
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29898
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29900
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29901
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29902
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29903
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29904
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29966
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29967
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29968
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29982
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29983
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29984
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29994
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29995
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29996
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29997
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29998
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 608
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29740
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29741
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29742
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29743
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29744
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29745
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29746
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29747
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29748
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29749
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29750
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29751
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29752
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29753
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29754
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29755
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29756
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29757
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29758
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29759
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29760
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29761
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29762
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29763
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29764
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29765
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29766
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29767
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29768
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29769
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29770
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29771
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29772
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29773
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29774
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29775
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29776
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29777
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29778
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29779
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29780
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29781
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29782
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29783
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29784
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29785
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29786
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29787
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29788
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29789
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29790
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29791
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29792
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29793
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29794
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29795
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29796
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29797
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29798
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29799
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29800
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29801
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29802
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29803
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29804
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29805
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29806
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29807
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29935
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29936
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29937
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29938
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29939
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29940
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29941
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29942
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29943
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29944
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29945
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29946
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29947
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29948
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29949
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29950
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29951
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29952
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29953
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29954
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29955
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29956
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29957
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29958
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29959
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29960
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29967
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29968
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29969
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29970
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29971
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29972
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29973
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29974
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29975
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29976
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29977
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29978
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29979
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29980
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29981
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29982
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29983
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29984
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29985
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29986
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29987
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29988
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29989
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29990
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29991
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29992
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29993
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29994
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29995
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29996
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29997
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29998
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29999
+#define QM_REG_PQTX2PF_38_RT_OFFSET 30000
+#define QM_REG_PQTX2PF_39_RT_OFFSET 30001
+#define QM_REG_PQTX2PF_40_RT_OFFSET 30002
+#define QM_REG_PQTX2PF_41_RT_OFFSET 30003
+#define QM_REG_PQTX2PF_42_RT_OFFSET 30004
+#define QM_REG_PQTX2PF_43_RT_OFFSET 30005
+#define QM_REG_PQTX2PF_44_RT_OFFSET 30006
+#define QM_REG_PQTX2PF_45_RT_OFFSET 30007
+#define QM_REG_PQTX2PF_46_RT_OFFSET 30008
+#define QM_REG_PQTX2PF_47_RT_OFFSET 30009
+#define QM_REG_PQTX2PF_48_RT_OFFSET 30010
+#define QM_REG_PQTX2PF_49_RT_OFFSET 30011
+#define QM_REG_PQTX2PF_50_RT_OFFSET 30012
+#define QM_REG_PQTX2PF_51_RT_OFFSET 30013
+#define QM_REG_PQTX2PF_52_RT_OFFSET 30014
+#define QM_REG_PQTX2PF_53_RT_OFFSET 30015
+#define QM_REG_PQTX2PF_54_RT_OFFSET 30016
+#define QM_REG_PQTX2PF_55_RT_OFFSET 30017
+#define QM_REG_PQTX2PF_56_RT_OFFSET 30018
+#define QM_REG_PQTX2PF_57_RT_OFFSET 30019
+#define QM_REG_PQTX2PF_58_RT_OFFSET 30020
+#define QM_REG_PQTX2PF_59_RT_OFFSET 30021
+#define QM_REG_PQTX2PF_60_RT_OFFSET 30022
+#define QM_REG_PQTX2PF_61_RT_OFFSET 30023
+#define QM_REG_PQTX2PF_62_RT_OFFSET 30024
+#define QM_REG_PQTX2PF_63_RT_OFFSET 30025
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 30026
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 30027
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 30028
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 30029
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 30030
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 30031
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 30032
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 30033
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 30034
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 30035
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 30036
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 30037
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 30038
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 30039
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 30040
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 30041
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 30042
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 30043
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 30044
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 30045
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 30046
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 30047
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 30048
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 30049
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 30050
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 30051
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 30052
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 30053
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 30054
#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30254
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30310
#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30510
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30566
#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30766
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30767
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30768
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30769
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30822
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30823
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30824
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30825
#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30785
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30841
#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30801
+#define QM_REG_RLPFCRD_RT_OFFSET 30857
#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30817
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30818
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30819
+#define QM_REG_RLPFENABLE_RT_OFFSET 30873
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30874
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30875
#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30835
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30891
#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30851
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31011
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31012
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31013
+#define QM_REG_WFQPFCRD_RT_OFFSET 30907
+#define QM_REG_WFQPFCRD_RT_SIZE 256
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31163
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31164
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31165
#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31525
+#define QM_REG_TXPQMAP_RT_OFFSET 31677
#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32037
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32189
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32549
+#define QM_REG_WFQVPCRD_RT_OFFSET 32701
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33061
+#define QM_REG_WFQVPMAP_RT_OFFSET 33213
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33573
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33735
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33736
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33737
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33738
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33739
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33740
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33725
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 320
+#define QM_REG_VOQCRDLINE_RT_OFFSET 34045
+#define QM_REG_VOQCRDLINE_RT_SIZE 36
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 34081
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 36
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34117
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34118
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34119
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34120
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34121
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34122
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34123
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34124
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33744
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34128
#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33748
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34132
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33752
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33753
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34136
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34137
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33785
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34169
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33801
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34185
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33817
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34201
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33833
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34217
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33849
-#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33850
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33851
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33852
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33853
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33859
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33860
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33861
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33862
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33863
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33864
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33865
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33866
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33867
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33868
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33869
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33870
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33871
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33872
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33873
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33874
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33875
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33876
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33877
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33878
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33879
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33880
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33881
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33882
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33883
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33884
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33885
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33886
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33887
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33888
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33889
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33890
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33891
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33892
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33893
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33894
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33895
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33896
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33897
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33898
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33899
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33900
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33901
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33902
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33903
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33904
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33905
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33906
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33907
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33908
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33909
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33910
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33911
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33912
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33913
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33914
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33915
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33916
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33917
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33918
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33919
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33920
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33921
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33922
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33923
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33924
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33925
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33926
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34233
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 34234
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34235
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34236
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34237
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34238
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34239
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34240
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34241
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34242
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34243
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34244
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34245
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34246
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34247
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34248
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34249
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34250
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34251
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34252
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34253
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34254
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34255
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34256
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34257
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34258
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34259
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34260
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34261
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34262
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34263
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34264
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34265
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34266
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34267
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34268
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34269
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34270
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34271
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34272
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34273
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34274
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34275
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34276
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34277
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34278
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34279
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34280
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34281
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34282
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34283
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34284
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34285
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34286
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34287
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34288
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34289
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34290
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34291
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34292
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34293
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34294
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34295
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34296
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34297
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34298
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34299
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34300
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34301
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34302
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34303
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34304
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34305
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34306
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34307
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34308
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34309
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34310
-#define RUNTIME_ARRAY_SIZE 33927
+#define RUNTIME_ARRAY_SIZE 34311
#endif /* __RT_DEFS_H__ */
diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h
index a4cb507f..c8e564f9 100644
--- a/drivers/net/qede/base/ecore_sp_api.h
+++ b/drivers/net/qede/base/ecore_sp_api.h
@@ -41,5 +41,24 @@ struct ecore_spq_comp_cb {
*/
enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe);
+/**
+ * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
+ * update Ramrod
+ *
+ * This ramrod is sent to update a tunneling configuration
+ * for a physical function (PF).
+ *
+ * @param p_hwfn
+ * @param p_tunn - pf update tunneling parameters
+ * @param comp_mode - completion mode
+ * @param p_comp_data - callback function
+ *
+ * @return enum _ecore_status_t
+ */
+enum _ecore_status_t
+ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn,
+ enum spq_mode comp_mode,
+ struct ecore_spq_comp_cb *p_comp_data);
#endif
diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c
index b3736a8c..8fd64d7a 100644
--- a/drivers/net/qede/base/ecore_sp_commands.c
+++ b/drivers/net/qede/base/ecore_sp_commands.c
@@ -22,6 +22,7 @@
#include "ecore_hw.h"
#include "ecore_dcbx.h"
#include "ecore_sriov.h"
+#include "ecore_vf.h"
enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
struct ecore_spq_entry **pp_ent,
@@ -31,7 +32,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
{
u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
struct ecore_spq_entry *p_ent = OSAL_NULL;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
+ enum _ecore_status_t rc;
if (!pp_ent)
return ECORE_INVAL;
@@ -88,7 +89,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
return ECORE_SUCCESS;
}
-static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
{
switch (type) {
case ECORE_TUNN_CLSS_MAC_VLAN:
@@ -107,224 +108,208 @@ static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
}
static void
-ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src,
+ bool b_pf_start)
{
- unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
- unsigned long update_mask = p_src->tunn_mode_update_mask;
- unsigned long tunn_mode = p_src->tunn_mode;
- unsigned long new_tunn_mode = 0;
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
- }
-
- if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- if (p_src->update_geneve_udp_port)
- DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
- p_src->update_geneve_udp_port = 0;
- p_src->tunn_mode = new_tunn_mode;
- return;
- }
+ if (p_src->vxlan.b_update_mode || b_pf_start)
+ p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
- }
+ if (p_src->l2_gre.b_update_mode || b_pf_start)
+ p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->ip_gre.b_update_mode || b_pf_start)
+ p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- } else {
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
- OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
- }
+ if (p_src->l2_geneve.b_update_mode || b_pf_start)
+ p_tun->l2_geneve.b_mode_enabled =
+ p_src->l2_geneve.b_mode_enabled;
- p_src->tunn_mode = new_tunn_mode;
+ if (p_src->ip_geneve.b_update_mode || b_pf_start)
+ p_tun->ip_geneve.b_mode_enabled =
+ p_src->ip_geneve.b_mode_enabled;
}
-static void
-ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_src,
- struct pf_update_tunnel_config *p_tunn_cfg)
+static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
{
- unsigned long tunn_mode = p_src->tunn_mode;
enum tunnel_clss type;
- ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
- p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
- p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
-
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port =
- OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
+ p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+ /* @DPDK - typecast tunnul class */
+ type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+ p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+ p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+ p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+ p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
+ type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+ p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
+}
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
+static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
+ struct ecore_tunnel_info *p_src)
+{
+ p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+ p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
+ if (p_src->geneve_port.b_update_port)
+ p_tun->geneve_port.port = p_src->geneve_port.port;
- if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- if (p_src->update_geneve_udp_port)
- DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
- p_src->update_geneve_udp_port = 0;
- return;
- }
+ if (p_src->vxlan_port.b_update_port)
+ p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
- }
+static void
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct ecore_tunn_update_type *tun_type)
+{
+ *p_tunn_cls = tun_type->tun_cls;
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
+ if (tun_type->b_mode_enabled)
+ *p_enable_tx_clas = 1;
+}
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
+static void
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+ struct ecore_tunn_update_type *tun_type,
+ u8 *p_update_port, __le16 *p_port,
+ struct ecore_tunn_update_udp_port *p_udp_port)
+{
+ __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
+ tun_type);
+ if (p_udp_port->b_update_port) {
+ *p_update_port = 1;
+ *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
+ }
+}
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
+
+ p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+ p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
}
static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
- unsigned long tunn_mode)
+ struct ecore_tunnel_info *p_tun)
{
- u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
- u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- l2gre_enable = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- ipgre_enable = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- vxlan_enable = 1;
+ ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+ ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
- ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
- ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+ ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled);
+}
- if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn)
+{
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel hw config is not supported\n");
return;
+ }
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- l2geneve_enable = 1;
+ if (p_tunn->vxlan_port.b_update_port)
+ ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_port.port);
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- ipgeneve_enable = 1;
+ if (p_tunn->geneve_port.b_update_port)
+ ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_port.port);
- ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
- ipgeneve_enable);
+ ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
}
static void
ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_start_params *p_src,
+ struct ecore_tunnel_info *p_src,
struct pf_start_tunnel_config *p_tunn_cfg)
{
- unsigned long tunn_mode;
- enum tunnel_clss type;
-
- if (!p_src)
- return;
-
- tunn_mode = p_src->tunn_mode;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
- p_tunn_cfg->tunnel_clss_vxlan = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
- p_tunn_cfg->tunnel_clss_l2gre = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
- p_tunn_cfg->tunnel_clss_ipgre = type;
-
- if (p_src->update_vxlan_udp_port) {
- p_tunn_cfg->set_vxlan_udp_port_flg = 1;
- p_tunn_cfg->vxlan_udp_port =
- OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2gre = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgre = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_vxlan = 1;
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
- if (p_src->update_geneve_udp_port)
- DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
- p_src->update_geneve_udp_port = 0;
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf start config is not supported\n");
return;
}
- if (p_src->update_geneve_udp_port) {
- p_tunn_cfg->set_geneve_udp_port_flg = 1;
- p_tunn_cfg->geneve_udp_port =
- OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
- }
-
- if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_l2geneve = 1;
-
- if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
- p_tunn_cfg->tx_enable_ipgeneve = 1;
+ if (!p_src)
+ return;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
- p_tunn_cfg->tunnel_clss_l2geneve = type;
- type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
- p_tunn_cfg->tunnel_clss_ipgeneve = type;
+ ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
+ ecore_set_tunn_cls_info(p_tun, p_src);
+ ecore_set_tunn_ports(p_tun, p_src);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tunn_cfg->tx_enable_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tunn_cfg->tx_enable_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tunn_cfg->tx_enable_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tunn_cfg->tx_enable_l2gre,
+ &p_tun->l2_gre);
+
+ __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tunn_cfg->tx_enable_ipgre,
+ &p_tun->ip_gre);
}
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch)
{
@@ -379,11 +364,11 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
- p_hwfn->p_eq->chain.pbl.p_phys_table);
+ p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
- p_hwfn->p_consq->chain.pbl.p_phys_table);
+ p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
&p_ramrod->tunnel_config);
@@ -419,11 +404,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
- if (p_tunn) {
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->tunn_mode);
- p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
- }
+ if (p_tunn)
+ ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
return rc;
}
@@ -498,7 +480,7 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
/* Set pf update ramrod command params */
enum _ecore_status_t
ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
enum spq_mode comp_mode,
struct ecore_spq_comp_cb *p_comp_data)
{
@@ -506,6 +488,18 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
struct ecore_sp_init_data init_data;
enum _ecore_status_t rc = ECORE_NOTIMPL;
+ if (IS_VF(p_hwfn->p_dev))
+ return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+ if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+ DP_NOTICE(p_hwfn, true,
+ "A0 chip: tunnel pf update config is not supported\n");
+ return rc;
+ }
+
+ if (!p_tunn)
+ return ECORE_INVAL;
+
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
init_data.cid = ecore_spq_get_cid(p_hwfn);
@@ -526,15 +520,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
if (rc != ECORE_SUCCESS)
return rc;
- if (p_tunn->update_vxlan_udp_port)
- ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->vxlan_udp_port);
- if (p_tunn->update_geneve_udp_port)
- ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
- p_tunn->geneve_udp_port);
-
- ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
- p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+ ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
return rc;
}
@@ -564,7 +550,7 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
{
struct ecore_spq_entry *p_ent = OSAL_NULL;
struct ecore_sp_init_data init_data;
- enum _ecore_status_t rc = ECORE_NOTIMPL;
+ enum _ecore_status_t rc;
/* Get SPQ entry */
OSAL_MEMSET(&init_data, 0, sizeof(init_data));
diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h
index 66c9a69b..33e31e42 100644
--- a/drivers/net/qede/base/ecore_sp_commands.h
+++ b/drivers/net/qede/base/ecore_sp_commands.h
@@ -68,32 +68,11 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
*/
enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_start_params *p_tunn,
+ struct ecore_tunnel_info *p_tunn,
enum ecore_mf_mode mode,
bool allow_npar_tx_switch);
/**
- * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration
- * update Ramrod
- *
- * This ramrod is sent to update a tunneling configuration
- * for a physical function (PF).
- *
- * @param p_hwfn
- * @param p_tunn - pf update tunneling parameters
- * @param comp_mode - completion mode
- * @param p_comp_data - callback function
- *
- * @return enum _ecore_status_t
- */
-
-enum _ecore_status_t
-ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
- struct ecore_tunn_update_params *p_tunn,
- enum spq_mode comp_mode,
- struct ecore_spq_comp_cb *p_comp_data);
-
-/**
* @brief ecore_sp_pf_update - PF Function Update Ramrod
*
* This ramrod updates function-related parameters. Every parameter can be
diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c
index 0d744ddd..3c1d05b3 100644
--- a/drivers/net/qede/base/ecore_spq.c
+++ b/drivers/net/qede/base/ecore_spq.c
@@ -173,11 +173,10 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
struct ecore_spq *p_spq)
{
- u16 pq;
struct ecore_cxt_info cxt_info;
struct core_conn_context *p_cxt;
- union ecore_qm_pq_params pq_params;
enum _ecore_status_t rc;
+ u16 physical_q;
cxt_info.iid = p_spq->cid;
@@ -191,23 +190,26 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt;
- SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
- SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
- /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- * XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
- */
- SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ /* @@@TBD we zero the context until we have ilt_reset implemented. */
+ OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
+
+ if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
+ */
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ }
/* CDU validation - FIXME currently disabled */
/* QM physical queue */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = LB_TC;
- pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
- p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
+ physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
p_cxt->xstorm_st_context.spq_base_lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
@@ -248,7 +250,8 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
/* make sure the SPQE is updated before the doorbell */
OSAL_WMB(p_hwfn->p_dev);
- DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
+ DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY),
+ *(u32 *)&db);
/* make sure doorbell is rang */
OSAL_WMB(p_hwfn->p_dev);
@@ -355,7 +358,7 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
return rc;
}
-struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
{
struct ecore_eq *p_eq;
@@ -364,7 +367,7 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
if (!p_eq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_eq'\n");
- return OSAL_NULL;
+ return ECORE_NOMEM;
}
/* Allocate and initialize EQ chain*/
@@ -373,34 +376,38 @@ struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
num_elem,
- sizeof(union event_ring_element), &p_eq->chain)) {
+ sizeof(union event_ring_element),
+ &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
goto eq_allocate_fail;
}
/* register EQ completion on the SP SB */
- ecore_int_register_cb(p_hwfn,
- ecore_eq_completion,
+ ecore_int_register_cb(p_hwfn, ecore_eq_completion,
p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
- return p_eq;
+ p_hwfn->p_eq = p_eq;
+ return ECORE_SUCCESS;
eq_allocate_fail:
- ecore_eq_free(p_hwfn, p_eq);
- return OSAL_NULL;
+ OSAL_FREE(p_hwfn->p_dev, p_eq);
+ return ECORE_NOMEM;
}
-void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
{
- ecore_chain_reset(&p_eq->chain);
+ ecore_chain_reset(&p_hwfn->p_eq->chain);
}
-void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
+void ecore_eq_free(struct ecore_hwfn *p_hwfn)
{
- if (!p_eq)
+ if (!p_hwfn->p_eq)
return;
- ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
- OSAL_FREE(p_hwfn->p_dev, p_eq);
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
+
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
+ p_hwfn->p_eq = OSAL_NULL;
}
/***************************************************************************
@@ -501,10 +508,13 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
}
/* SPQ ring */
- if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
- ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
- /* N/A when the mode is SINGLE */
- sizeof(struct slow_path_element), &p_spq->chain)) {
+ if (ecore_chain_alloc(p_hwfn->p_dev,
+ ECORE_CHAIN_USE_TO_PRODUCE,
+ ECORE_CHAIN_MODE_SINGLE,
+ ECORE_CHAIN_CNT_TYPE_U16,
+ 0, /* N/A when the mode is SINGLE */
+ sizeof(struct slow_path_element),
+ &p_spq->chain, OSAL_NULL)) {
DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
goto spq_allocate_fail;
}
@@ -920,6 +930,9 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
if (found->comp_cb.function)
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
+ "Got a completion without a callback function\n");
if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
(found->queue == &p_spq->unlimited_pending))
@@ -937,7 +950,7 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
return rc;
}
-struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
{
struct ecore_consq *p_consq;
@@ -947,7 +960,7 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
if (!p_consq) {
DP_NOTICE(p_hwfn, true,
"Failed to allocate `struct ecore_consq'\n");
- return OSAL_NULL;
+ return ECORE_NOMEM;
}
/* Allocate and initialize EQ chain */
@@ -956,27 +969,30 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
ECORE_CHAIN_MODE_PBL,
ECORE_CHAIN_CNT_TYPE_U16,
ECORE_CHAIN_PAGE_SIZE / 0x80,
- 0x80, &p_consq->chain)) {
+ 0x80,
+ &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
- return p_consq;
+ p_hwfn->p_consq = p_consq;
+ return ECORE_SUCCESS;
consq_allocate_fail:
- ecore_consq_free(p_hwfn, p_consq);
- return OSAL_NULL;
+ OSAL_FREE(p_hwfn->p_dev, p_consq);
+ return ECORE_NOMEM;
}
-void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
{
- ecore_chain_reset(&p_consq->chain);
+ ecore_chain_reset(&p_hwfn->p_consq->chain);
}
-void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
+void ecore_consq_free(struct ecore_hwfn *p_hwfn)
{
- if (!p_consq)
+ if (!p_hwfn->p_consq)
return;
- ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
- OSAL_FREE(p_hwfn->p_dev, p_consq);
+
+ ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
+ OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
}
diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h
index 717ede30..e530f834 100644
--- a/drivers/net/qede/base/ecore_spq.h
+++ b/drivers/net/qede/base/ecore_spq.h
@@ -26,6 +26,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
+ struct rx_update_gft_filter_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
@@ -194,28 +195,23 @@ void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
* @param p_hwfn
* @param num_elem number of elements in the eq
*
- * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ * @return enum _ecore_status_t
*/
-struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn,
- u16 num_elem);
+enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem);
/**
- * @brief ecore_eq_setup - Reset the SPQ to its start state.
+ * @brief ecore_eq_setup - Reset the EQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_eq_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_eq *p_eq);
+void ecore_eq_setup(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_eq_deallocate - deallocates the given EQ struct.
+ * @brief ecore_eq_free - deallocates the given EQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_eq_free(struct ecore_hwfn *p_hwfn,
- struct ecore_eq *p_eq);
+void ecore_eq_free(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_eq_prod_update - update the FW with default EQ producer
@@ -261,32 +257,26 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_consq_alloc - Allocates & initializes an ConsQ
- * struct
+ * @brief ecore_consq_alloc - Allocates & initializes an ConsQ struct
*
* @param p_hwfn
*
- * @return struct ecore_eq* - a newly allocated structure; NULL upon error.
+ * @return enum _ecore_status_t
*/
-struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
+enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn);
/**
- * @brief ecore_consq_setup - Reset the ConsQ to its start
- * state.
+ * @brief ecore_consq_setup - Reset the ConsQ to its start state.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_consq_setup(struct ecore_hwfn *p_hwfn,
- struct ecore_consq *p_consq);
+void ecore_consq_setup(struct ecore_hwfn *p_hwfn);
/**
* @brief ecore_consq_free - deallocates the given ConsQ struct.
*
* @param p_hwfn
- * @param p_eq
*/
-void ecore_consq_free(struct ecore_hwfn *p_hwfn,
- struct ecore_consq *p_consq);
+void ecore_consq_free(struct ecore_hwfn *p_hwfn);
#endif /* __ECORE_SPQ_H__ */
diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c
index b28d7281..db2873e7 100644
--- a/drivers/net/qede/base/ecore_sriov.c
+++ b/drivers/net/qede/base/ecore_sriov.c
@@ -51,6 +51,8 @@ const char *ecore_channel_tlvs_string[] = {
"CHANNEL_TLV_VPORT_UPDATE_RSS",
"CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
"CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+ "CHANNEL_TLV_UPDATE_TUNN_PARAM",
+ "CHANNEL_TLV_COALESCE_UPDATE",
"CHANNEL_TLV_MAX"
};
@@ -86,6 +88,7 @@ static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_ETH;
break;
case ECORE_PCI_ETH_ROCE:
+ case ECORE_PCI_ETH_IWARP:
p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
break;
default:
@@ -146,7 +149,7 @@ static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
}
bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
- bool b_enabled_only)
+ bool b_enabled_only, bool b_non_malicious)
{
if (!p_hwfn->pf_iov_info) {
DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
@@ -161,6 +164,10 @@ bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
b_enabled_only)
return false;
+ if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
+ b_non_malicious)
+ return false;
+
return true;
}
@@ -175,7 +182,8 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
return OSAL_NULL;
}
- if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
+ b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else
DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
@@ -184,28 +192,90 @@ struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
return vf;
}
+static struct ecore_queue_cid *
+ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ struct ecore_vf_queue *p_queue)
+{
+ int i;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx)
+ return p_queue->cids[i].p_cid;
+ }
+
+ return OSAL_NULL;
+}
+
+enum ecore_iov_validate_q_mode {
+ ECORE_IOV_VALIDATE_Q_NA,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ ECORE_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u16 qid,
+ enum ecore_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ int i;
+
+ if (mode == ECORE_IOV_VALIDATE_Q_NA)
+ return true;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ struct ecore_vf_queue_cid *p_qcid;
+
+ p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+ if (p_qcid->p_cid == OSAL_NULL)
+ continue;
+
+ if (p_qcid->b_is_tx != b_is_tx)
+ continue;
+
+ /* Found. It's enabled. */
+ return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
+ }
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
+}
+
static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf,
- u16 rx_qid)
+ u16 rx_qid,
+ enum ecore_iov_validate_q_mode mode)
{
- if (rx_qid >= p_vf->num_rxqs)
+ if (rx_qid >= p_vf->num_rxqs) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[0x%02x] - can't touch Rx queue[%04x];"
" Only 0x%04x are allocated\n",
p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
- return rx_qid < p_vf->num_rxqs;
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
+ mode, false);
}
static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf,
- u16 tx_qid)
+ u16 tx_qid,
+ enum ecore_iov_validate_q_mode mode)
{
- if (tx_qid >= p_vf->num_txqs)
+ if (tx_qid >= p_vf->num_txqs) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[0x%02x] - can't touch Tx queue[%04x];"
" Only 0x%04x are allocated\n",
p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
- return tx_qid < p_vf->num_txqs;
+ return false;
+ }
+
+ return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
+ mode, true);
}
static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
@@ -226,6 +296,35 @@ static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
return false;
}
+/* Is there at least 1 queue open? */
+static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
+}
+
/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
{
@@ -317,10 +416,9 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
- DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x,"
+ DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
"ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
- " stride %d, page size 0x%x\n", 0,
- /* @@@TBD MichalK - function id */
+ " stride %d, page size 0x%x\n",
iov->nres, iov->cap, iov->ctrl,
iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
iov->offset, iov->stride, iov->pgsz);
@@ -395,8 +493,6 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
return;
}
- p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
-
for (idx = 0; idx < p_iov->total_vfs; idx++) {
struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
u32 concrete;
@@ -425,8 +521,6 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
/* TODO - need to devise a better way of getting opaque */
vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
(vf->abs_vf_id << 8);
- /* @@TBD MichalK - add base vport_id of VFs to equation */
- vf->vport_id = p_iov_info->base_vport_id + idx;
vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
@@ -550,7 +644,6 @@ void ecore_iov_free(struct ecore_hwfn *p_hwfn)
void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
{
OSAL_FREE(p_dev, p_dev->p_iov_info);
- p_dev->p_iov_info = OSAL_NULL;
}
enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
@@ -593,18 +686,33 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"IOV capabilities, but no VFs are published\n");
OSAL_FREE(p_dev, p_dev->p_iov_info);
- p_dev->p_iov_info = OSAL_NULL;
return ECORE_SUCCESS;
}
- /* Calculate the first VF index - this is a bit tricky; Basically,
- * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
- * after the first engine's VFs.
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only be later
+ * to diffrentiate between the two.
*/
- p_dev->p_iov_info->first_vf_in_pf = p_hwfn->p_dev->p_iov_info->offset +
- p_hwfn->abs_pf_id - 16;
- if (ECORE_PATH_ID(p_hwfn))
- p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+
+ if (ECORE_PATH_ID(p_hwfn))
+ p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->p_dev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ p_dev->p_iov_info->first_vf_in_pf = first;
+ }
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"First VF in hwfn 0x%08x\n",
@@ -613,7 +721,8 @@ enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
return ECORE_SUCCESS;
}
-bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
+ bool b_fail_malicious)
{
/* Check PF supports sriov */
if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
@@ -621,12 +730,17 @@ bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
return false;
/* Check VF validity */
- if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true))
+ if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
return false;
return true;
}
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+{
+ return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
+}
+
void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
u16 rel_vf_id, u8 to_disable)
{
@@ -747,6 +861,9 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+ /* It's possible VF was previously considered malicious */
+ vf->b_malicious = false;
+
rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
vf->abs_vf_id, vf->num_sbs);
if (rc != ECORE_SUCCESS)
@@ -901,17 +1018,59 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
vf->num_sbs = 0;
}
-enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
- struct ecore_ptt *p_ptt,
- u16 rel_vf_id, u16 num_rx_queues)
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+ u16 vfid,
+ struct ecore_mcp_link_params *params,
+ struct ecore_mcp_link_state *link,
+ struct ecore_mcp_link_capabilities *p_caps)
+{
+ struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+ struct ecore_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_iov_vf_init_params *p_params)
{
+ struct ecore_mcp_link_capabilities link_caps;
+ struct ecore_mcp_link_params link_params;
+ struct ecore_mcp_link_state link_state;
u8 num_of_vf_available_chains = 0;
struct ecore_vf_info *vf = OSAL_NULL;
+ u16 qid, num_irqs;
enum _ecore_status_t rc = ECORE_SUCCESS;
u32 cids;
u8 i;
- vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
return ECORE_UNKNOWN_ERROR;
@@ -919,22 +1078,80 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
if (vf->b_init) {
DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
- rel_vf_id);
+ p_params->rel_vf_id);
+ return ECORE_INVAL;
+ }
+
+ /* Perform sanity checking on the requested vport/rss */
+ if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
+ p_params->rel_vf_id, p_params->vport_id);
+ return ECORE_INVAL;
+ }
+
+ if ((p_params->num_queues > 1) &&
+ (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
+ DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
+ p_params->rel_vf_id, p_params->rss_eng_id);
return ECORE_INVAL;
}
+ /* TODO - remove this once we get confidence of change */
+ if (!p_params->vport_id) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
+ DP_NOTICE(p_hwfn, false,
+ "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
+ p_params->rel_vf_id);
+ }
+ vf->vport_id = p_params->vport_id;
+ vf->rss_eng_id = p_params->rss_eng_id;
+
+ /* Perform sanity checking on the requested queue_id */
+ for (i = 0; i < p_params->num_queues; i++) {
+ u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
+ u16 max_vf_qzone = min_vf_qzone +
+ FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
+
+ qid = p_params->req_rx_queue[i];
+ if (qid < min_vf_qzone || qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+ qid, p_params->rel_vf_id,
+ min_vf_qzone, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ qid = p_params->req_tx_queue[i];
+ if (qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn, true,
+ "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+ qid, p_params->rel_vf_id, max_vf_qzone);
+ return ECORE_INVAL;
+ }
+
+ /* If client *really* wants, Tx qid can be shared with PF */
+ if (qid < min_vf_qzone)
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+ p_params->rel_vf_id, qid, i);
+ }
+
/* Limit number of queues according to number of CIDs */
ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF[%d] - requesting to initialize for 0x%04x queues"
" [0x%04x CIDs available]\n",
- vf->relative_vf_id, num_rx_queues, (u16)cids);
- num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
+ vf->relative_vf_id, p_params->num_queues, (u16)cids);
+ num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
p_ptt,
vf,
- num_rx_queues);
+ num_irqs);
if (num_of_vf_available_chains == 0) {
DP_ERR(p_hwfn, "no available igu sbs\n");
return ECORE_NOMEM;
@@ -945,28 +1162,28 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
vf->num_txqs = num_of_vf_available_chains;
for (i = 0; i < vf->num_rxqs; i++) {
- u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
- vf->igu_sbs[i]);
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
- if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
- DP_NOTICE(p_hwfn, true,
- "VF[%d] will require utilizing of"
- " out-of-bounds queues - %04x\n",
- vf->relative_vf_id, queue_id);
- /* TODO - cleanup the already allocate SBs */
- return ECORE_INVAL;
- }
-
- /* CIDs are per-VF, so no problem having them 0-based. */
- vf->vf_queues[i].fw_rx_qid = queue_id;
- vf->vf_queues[i].fw_tx_qid = queue_id;
- vf->vf_queues[i].fw_cid = i;
+ p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+ p_queue->fw_tx_qid = p_params->req_tx_queue[i];
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
- vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid);
}
+ /* Update the link configuration in bulletin.
+ */
+ OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
+ sizeof(link_params));
+ OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
+ sizeof(link_state));
+ OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
+ sizeof(link_caps));
+ ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
+ &link_params, &link_state, &link_caps);
+
rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
if (rc == ECORE_SUCCESS) {
@@ -981,43 +1198,6 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
return rc;
}
-void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
- u16 vfid,
- struct ecore_mcp_link_params *params,
- struct ecore_mcp_link_state *link,
- struct ecore_mcp_link_capabilities *p_caps)
-{
- struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
- struct ecore_bulletin_content *p_bulletin;
-
- if (!p_vf)
- return;
-
- p_bulletin = p_vf->bulletin.p_virt;
- p_bulletin->req_autoneg = params->speed.autoneg;
- p_bulletin->req_adv_speed = params->speed.advertised_speeds;
- p_bulletin->req_forced_speed = params->speed.forced_speed;
- p_bulletin->req_autoneg_pause = params->pause.autoneg;
- p_bulletin->req_forced_rx = params->pause.forced_rx;
- p_bulletin->req_forced_tx = params->pause.forced_tx;
- p_bulletin->req_loopback = params->loopback_mode;
-
- p_bulletin->link_up = link->link_up;
- p_bulletin->speed = link->speed;
- p_bulletin->full_duplex = link->full_duplex;
- p_bulletin->autoneg = link->an;
- p_bulletin->autoneg_complete = link->an_complete;
- p_bulletin->parallel_detection = link->parallel_detection;
- p_bulletin->pfc_enabled = link->pfc_enabled;
- p_bulletin->partner_adv_speed = link->partner_adv_speed;
- p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
- p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
- p_bulletin->partner_adv_pause = link->partner_adv_pause;
- p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
-
- p_bulletin->capability_speed = p_caps->speed_capabilities;
-}
-
enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
u16 rel_vf_id)
@@ -1326,7 +1506,7 @@ struct ecore_public_vf_info
static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf)
{
- u32 i;
+ u32 i, j;
p_vf->vf_bulletin = 0;
p_vf->vport_instance = 0;
p_vf->configured_features = 0;
@@ -1337,8 +1517,18 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
p_vf->num_active_rxqs = 0;
- for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++)
- p_vf->vf_queues[i].rxq_active = 0;
+ for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (!p_queue->cids[j].p_cid)
+ continue;
+
+ ecore_eth_queue_cid_release(p_hwfn,
+ p_queue->cids[j].p_cid);
+ p_queue->cids[j].p_cid = OSAL_NULL;
+ }
+ }
OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
@@ -1351,7 +1541,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
struct vf_pf_resc_request *p_req,
struct pf_vf_resc *p_resp)
{
- int i;
+ u8 i;
/* Queue related information */
p_resp->num_rxqs = p_vf->num_rxqs;
@@ -1372,7 +1562,7 @@ static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
for (i = 0; i < p_resp->num_rxqs; i++) {
ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
(u16 *)&p_resp->hw_qid[i]);
- p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ p_resp->cid[i] = i;
}
/* Filter related information */
@@ -1460,6 +1650,18 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+ /* TODO - not doing anything is bad since we'll assert, but this isn't
+ * necessarily the right behavior - perhaps we should have allowed some
+ * versatility here.
+ */
+ if (vf->state != VF_FREE &&
+ vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
/* Validate FW compatibility */
if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
if (req->vfdev_info.capabilities &
@@ -1575,12 +1777,12 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
"VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
" db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
"resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
- " n_vlans-%d, n_mcs-%d\n",
+ " n_vlans-%d\n",
vf->abs_vf_id, resp->pfdev_info.chip_num,
resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
(unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
- resc->num_vlan_filters, resc->num_mc_filters);
+ resc->num_vlan_filters);
vf->state = VF_ACQUIRED;
@@ -1650,11 +1852,9 @@ ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
filter.vlan, p_vf->relative_vf_id);
- rc = ecore_sp_eth_filter_ucast(p_hwfn,
- p_vf->opaque_fid,
- &filter,
- ECORE_SPQ_MODE_CB,
- OSAL_NULL);
+ rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, ECORE_SPQ_MODE_CB,
+ OSAL_NULL);
if (rc) {
DP_NOTICE(p_hwfn, true,
"Failed to configure VLAN [%04x]"
@@ -1682,9 +1882,10 @@ ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
return rc;
}
-static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
- struct ecore_vf_info *p_vf,
- u64 events)
+static enum _ecore_status_t
+ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
+ struct ecore_vf_info *p_vf,
+ u64 events)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
struct ecore_filter_ucast filter;
@@ -1764,14 +1965,17 @@ static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
/* Update all the Rx queues */
for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
- u16 qid;
+ struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+ struct ecore_queue_cid *p_cid = OSAL_NULL;
- if (!p_vf->vf_queues[i].rxq_active)
+ /* There can be at most 1 Rx queue on qzone. Find it */
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
+ p_queue);
+ if (p_cid == OSAL_NULL)
continue;
- qid = p_vf->vf_queues[i].fw_rx_qid;
-
- rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+ (void **)&p_cid,
1, 0, 1,
ECORE_SPQ_MODE_EBLOCK,
OSAL_NULL);
@@ -1779,7 +1983,7 @@ static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, true,
"Failed to send Rx update"
" fo queue[0x%04x]\n",
- qid);
+ p_cid->rel.queue_id);
return rc;
}
}
@@ -1823,6 +2027,8 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
vf->state = VF_ENABLED;
start = &mbx->req_virt->start_vport;
+ ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
/* Initialize Status block in CAU */
for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
if (!start->sb_addr[sb_id]) {
@@ -1837,7 +2043,6 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
vf->igu_sbs[sb_id],
vf->abs_vf_id, 1);
}
- ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
vf->mtu = start->mtu;
vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1904,6 +2109,15 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
vf->vport_instance--;
vf->spoof_chk = false;
+ if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
+ (ecore_iov_validate_active_txq(p_hwfn, vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn, false,
+ "VF [%02x] - considered malicious;"
+ " Unable to stop RX/TX queuess\n",
+ vf->abs_vf_id);
+ }
+
rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc != ECORE_SUCCESS) {
DP_ERR(p_hwfn,
@@ -1961,63 +2175,242 @@ static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_queue_start_common_params p_params;
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct ecore_vf_queue *p_queue;
struct vfpf_start_rxq_tlv *req;
+ struct ecore_queue_cid *p_cid;
bool b_legacy_vf = false;
+ u8 qid_usage_idx;
enum _ecore_status_t rc;
req = &mbx->req_virt->start_rxq;
- OSAL_MEMSET(&p_params, 0, sizeof(p_params));
- p_params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
- p_params.vf_qid = req->rx_qid;
- p_params.vport_id = vf->vport_id;
- p_params.stats_id = vf->abs_vf_id + 0x10,
- p_params.sb = req->hw_sb;
- p_params.sb_idx = req->sb_index;
-
- if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ ECORE_IOV_VALIDATE_Q_DISABLE) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- /* Legacy VFs have their Producers in a different location, which they
- * calculate on their own and clean the producer prior to this.
+ /* Legacy VFs made assumptions on the CID their queues connected to,
+ * assuming queue X used CID X.
+ * TODO - need to validate that there was no official release post
+ * the current legacy scheme that still made that assumption.
*/
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN)
b_legacy_vf = true;
- else
+
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->rx_qid];
+
+ OSAL_MEMSET(&params, 0, sizeof(params));
+ params.queue_id = (u8)p_queue->fw_rx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ /* TODO - set qid_usage_idx according to extended TLV. For now, use
+ * '0' for Rx.
+ */
+ qid_usage_idx = 0;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->rx_qid;
+ vf_params.b_legacy = b_legacy_vf;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (!b_legacy_vf)
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
- rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
- vf->vf_queues[req->rx_qid].fw_cid,
- &p_params,
- req->bd_max_bytes,
- req->rxq_addr,
- req->cqe_pbl_addr,
- req->cqe_pbl_size,
- b_legacy_vf);
-
- if (rc) {
+ rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr,
+ req->cqe_pbl_size);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
} else {
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = false;
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->rx_qid].rxq_active = true;
vf->num_active_rxqs++;
}
out:
- ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
- status, b_legacy_vf);
+ ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ b_legacy_vf);
+}
+
+static void
+ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct ecore_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ enum ecore_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & (1 << mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & (1 << mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_tun,
+ struct ecore_tunn_update_udp_port *p_port,
+ enum ecore_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *p_vf)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct ecore_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ OSAL_MEM_ZERO(&tunn, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!ecore_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ ECORE_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ ECORE_MODE_L2GRE_TUNN,
+ p_req->l2gre_clss);
+ __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ ECORE_MODE_IPGRE_TUNN,
+ p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If ECORE client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc != ECORE_SUCCESS)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ ecore_for_each_vf(p_hwfn, i) {
+ ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
}
static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *p_vf,
+ u32 cid,
u8 status)
{
struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
@@ -2046,12 +2439,8 @@ static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
sizeof(struct channel_list_end_tlv));
/* Update the TLV with the response */
- if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
- u16 qid = mbx->req_virt->start_txq.tx_qid;
-
- p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
- DQ_DEMS_LEGACY);
- }
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+ p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
}
@@ -2060,48 +2449,80 @@ static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
- struct ecore_queue_start_common_params p_params;
+ struct ecore_queue_start_common_params params;
+ struct ecore_queue_cid_vf_params vf_params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
- union ecore_qm_pq_params pq_params;
+ struct ecore_vf_queue *p_queue;
struct vfpf_start_txq_tlv *req;
+ struct ecore_queue_cid *p_cid;
+ bool b_legacy_vf = false;
+ u8 qid_usage_idx;
+ u32 cid = 0;
enum _ecore_status_t rc;
+ u16 pq;
- /* Prepare the parameters which would choose the right PQ */
- OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
- pq_params.eth.is_vf = 1;
- pq_params.eth.vf_id = vf->relative_vf_id;
-
+ OSAL_MEMSET(&params, 0, sizeof(params));
req = &mbx->req_virt->start_txq;
- OSAL_MEMSET(&p_params, 0, sizeof(p_params));
- p_params.queue_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
- p_params.vport_id = vf->vport_id;
- p_params.stats_id = vf->abs_vf_id + 0x10,
- p_params.sb = req->hw_sb;
- p_params.sb_idx = req->sb_index;
-
- if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ ECORE_IOV_VALIDATE_Q_NA) ||
!ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
goto out;
- rc = ecore_sp_eth_txq_start_ramrod(
- p_hwfn,
- vf->opaque_fid,
- vf->vf_queues[req->tx_qid].fw_cid,
- &p_params,
- req->pbl_addr,
- req->pbl_size,
- &pq_params);
+ /* In case this is a legacy VF - need to know to use the right cids.
+ * TODO - need to validate that there was no official release post
+ * the current legacy scheme that still made that assumption.
+ */
+ if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy_vf = true;
- if (rc)
+ /* Acquire a new queue-cid */
+ p_queue = &vf->vf_queues[req->tx_qid];
+
+ params.queue_id = p_queue->fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ /* TODO - set qid_usage_idx according to extended TLV. For now, use
+ * '1' for Tx.
+ */
+ qid_usage_idx = 1;
+
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->tx_qid;
+ vf_params.b_legacy = b_legacy_vf;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, &vf_params);
+ if (p_cid == OSAL_NULL)
+ goto out;
+
+ pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+ vf->relative_vf_id);
+ rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+ req->pbl_addr, req->pbl_size, pq);
+ if (rc != ECORE_SUCCESS) {
status = PFVF_STATUS_FAILURE;
- else {
+ ecore_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
status = PFVF_STATUS_SUCCESS;
- vf->vf_queues[req->tx_qid].txq_active = true;
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = true;
+ cid = p_cid->cid;
}
out:
- ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
+ ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
+ cid, status);
}
static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
@@ -2111,22 +2532,37 @@ static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
bool cqe_completion)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid;
+ int qid, i;
+ /* TODO - improve validation [wrap around] */
if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
return ECORE_INVAL;
for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
- if (vf->vf_queues[qid].rxq_active) {
- rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_rx_qid, false,
- cqe_completion);
-
- if (rc)
- return rc;
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+ struct ecore_queue_cid **pp_cid = OSAL_NULL;
+
+ /* There can be at most a single Rx per qzone. Find it */
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid &&
+ !p_queue->cids[i].b_is_tx) {
+ pp_cid = &p_queue->cids[i].p_cid;
+ break;
+ }
}
- vf->vf_queues[qid].rxq_active = false;
+ if (pp_cid == OSAL_NULL) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
+ vf->relative_vf_id, qid);
+ continue;
+ }
+
+ rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
+ false, cqe_completion);
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ *pp_cid = OSAL_NULL;
vf->num_active_rxqs--;
}
@@ -2138,22 +2574,33 @@ static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
u16 txq_id, u8 num_txqs)
{
enum _ecore_status_t rc = ECORE_SUCCESS;
- int qid;
+ struct ecore_vf_queue *p_queue;
+ int qid, j;
- if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+ if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
+ ECORE_IOV_VALIDATE_Q_NA) ||
+ !ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
+ ECORE_IOV_VALIDATE_Q_NA))
return ECORE_INVAL;
for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
- if (vf->vf_queues[qid].txq_active) {
- rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
- vf->vf_queues[qid].
- fw_tx_qid);
+ p_queue = &vf->vf_queues[qid];
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (p_queue->cids[j].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[j].b_is_tx)
+ continue;
- if (rc)
+ rc = ecore_eth_tx_queue_stop(p_hwfn,
+ p_queue->cids[j].p_cid);
+ if (rc != ECORE_SUCCESS)
return rc;
+
+ p_queue->cids[j].p_cid = OSAL_NULL;
}
- vf->vf_queues[qid].txq_active = false;
}
+
return rc;
}
@@ -2208,44 +2655,52 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
u16 length = sizeof(struct pfvf_def_resp_tlv);
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct vfpf_update_rxq_tlv *req;
- u8 status = PFVF_STATUS_SUCCESS;
+ u8 status = PFVF_STATUS_FAILURE;
u8 complete_event_flg;
u8 complete_cqe_flg;
- u16 qid;
enum _ecore_status_t rc;
- u8 i;
+ u16 i;
req = &mbx->req_virt->update_rxq;
complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
- for (i = 0; i < req->num_rxqs; i++) {
- qid = req->rx_qid + i;
-
- if (!vf->vf_queues[qid].rxq_active) {
- DP_NOTICE(p_hwfn, true,
- "VF rx_qid = %d isn`t active!\n", qid);
- status = PFVF_STATUS_FAILURE;
- break;
+ /* Validate inputs */
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid,
+ req->num_rxqs);
+ goto out;
}
+ }
- rc = ecore_sp_eth_rx_queues_update(p_hwfn,
- vf->vf_queues[qid].fw_rx_qid,
- 1,
- complete_cqe_flg,
- complete_event_flg,
- ECORE_SPQ_MODE_EBLOCK,
- OSAL_NULL);
+ for (i = 0; i < req->num_rxqs; i++) {
+ struct ecore_vf_queue *p_queue;
+ u16 qid = req->rx_qid + i;
- if (rc) {
- status = PFVF_STATUS_FAILURE;
- break;
- }
+ p_queue = &vf->vf_queues[qid];
+ handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ p_queue);
}
+ rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+ req->num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg,
+ ECORE_SPQ_MODE_EBLOCK,
+ OSAL_NULL);
+ if (rc)
+ goto out;
+
+ status = PFVF_STATUS_SUCCESS;
+out:
ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
length, status);
}
@@ -2422,12 +2877,14 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *vf,
struct ecore_sp_vport_update_params *p_data,
struct ecore_rss_params *p_rss,
- struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+ struct ecore_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
{
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
- u16 i, q_idx, max_q_idx;
+ bool b_reject = false;
u16 table_size;
+ u16 i, q_idx;
p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
@@ -2452,39 +2909,38 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
VFPF_UPDATE_RSS_KEY_FLAG);
p_rss->rss_enable = p_rss_tlv->rss_enable;
- p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_eng_id = vf->rss_eng_id;
p_rss->rss_caps = p_rss_tlv->rss_caps;
p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
- OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
- sizeof(p_rss->rss_ind_table));
OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
sizeof(p_rss->rss_key));
table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
(1 << p_rss_tlv->rss_table_size_log));
- max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
-
for (i = 0; i < table_size; i++) {
- u16 index = vf->vf_queues[0].fw_rx_qid;
+ struct ecore_queue_cid *p_cid;
- q_idx = p_rss->rss_ind_table[i];
- if (q_idx >= max_q_idx)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d,"
- " rxq is out of range\n",
- i, q_idx);
- else if (!vf->vf_queues[q_idx].rxq_active)
- DP_NOTICE(p_hwfn, true,
- "rss_ind_table[%d] = %d, rxq is not active\n",
- i, q_idx);
- else
- index = vf->vf_queues[q_idx].fw_rx_qid;
- p_rss->rss_ind_table[i] = index;
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
+ ECORE_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ &vf->vf_queues[q_idx]);
+ p_rss->rss_ind_table[i] = p_cid;
}
p_data->rss_params = p_rss;
+out:
*tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
}
static void
@@ -2540,11 +2996,11 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
struct ecore_vf_info *vf)
{
+ struct ecore_rss_params *p_rss_params = OSAL_NULL;
struct ecore_sp_vport_update_params params;
struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
struct ecore_sge_tpa_params sge_tpa_params;
u16 tlvs_mask = 0, tlvs_accepted = 0;
- struct ecore_rss_params rss_params;
u8 status = PFVF_STATUS_SUCCESS;
u16 length;
enum _ecore_status_t rc;
@@ -2559,6 +3015,12 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
goto out;
}
+ p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
+ if (p_rss_params == OSAL_NULL) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
OSAL_MEMSET(&params, 0, sizeof(params));
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
@@ -2572,20 +3034,24 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
- ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
- mbx, &tlvs_mask);
ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
&sge_tpa_params, mbx, &tlvs_mask);
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
+ */
+ ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
/* Just log a message if there is no single extended tlv in buffer.
* When all features of vport update ramrod would be requested by VF
* as extended TLVs in buffer then an error can be returned in response
* if there is no extended TLV present in buffer.
*/
- tlvs_accepted = tlvs_mask;
-
-#ifndef LINUX_REMOVE
if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
&params, &tlvs_accepted) !=
ECORE_SUCCESS) {
@@ -2593,7 +3059,6 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
status = PFVF_STATUS_NOT_SUPPORTED;
goto out;
}
-#endif
if (!tlvs_accepted) {
if (tlvs_mask)
@@ -2614,6 +3079,7 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
status = PFVF_STATUS_FAILURE;
out:
+ OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
tlvs_mask, tlvs_accepted);
ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
@@ -2916,6 +3382,88 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
length, status);
}
+static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ struct ecore_vf_info *vf)
+{
+ struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ struct vfpf_update_coalesce *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct ecore_queue_cid *p_cid;
+ u16 rx_coal, tx_coal;
+ u16 qid;
+ int i;
+
+ req = &mbx->req_virt->update_coalesce;
+
+ rx_coal = req->rx_coal;
+ tx_coal = req->tx_coal;
+ qid = req->qid;
+
+ if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ rx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+ ECORE_IOV_VALIDATE_Q_ENABLE) &&
+ tx_coal) {
+ DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
+ &vf->vf_queues[qid]);
+
+ rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ }
+
+ /* TODO - in future, it might be possible to pass this in a per-cid
+ * granularity. For now, do this for all Tx queues.
+ */
+ if (tx_coal) {
+ struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid == OSAL_NULL)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+ if (rc != ECORE_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
static enum _ecore_status_t
ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
@@ -3049,6 +3597,13 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
return rc;
}
+ /* Workaround to make VF-PF channel ready, as FW
+ * doesn't do that as a part of FLR.
+ */
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+
/* VF_STOPPED has to be set only after final cleanup
* but prior to re-enabling the VF.
*/
@@ -3115,9 +3670,10 @@ ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
return rc;
}
-int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
{
- u16 i, found = 0;
+ bool found = false;
+ u16 i;
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -3127,7 +3683,7 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
if (!p_hwfn->p_dev->p_iov_info) {
DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
- return 0;
+ return false;
}
/* Mark VFs */
@@ -3156,7 +3712,7 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
* VF flr until ACKs, we're safe.
*/
p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
- found = 1;
+ found = true;
}
}
@@ -3215,7 +3771,8 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
p_vf, mbx->first_tlv.tl.type);
/* check if tlv type is known */
- if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
+ !p_vf->b_malicious) {
/* switch on the opcode */
switch (mbx->first_tlv.tl.type) {
case CHANNEL_TLV_ACQUIRE:
@@ -3257,7 +3814,34 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
case CHANNEL_TLV_RELEASE:
ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_UPDATE:
+ ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
}
+ } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ /* If we've received a message from a VF we consider malicious
+ * we ignore the messasge unless it's one for RELEASE, in which
+ * case we'll let it have the benefit of doubt, allowing the
+ * next loaded driver to start again.
+ */
+ if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
+ /* TODO - initiate FLR, remove malicious indication */
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
+ p_vf->abs_vf_id);
+ } else {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+ }
+
+ ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_MALICIOUS);
} else {
/* unknown TLV - this may belong to a VF driver from the future
* - a version written after this PF driver was written, which
@@ -3322,21 +3906,31 @@ void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
}
-static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
- u16 abs_vfid,
- struct regpair *vf_msg)
+static struct ecore_vf_info *
+ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
{
u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
- struct ecore_vf_info *p_vf;
- if (!ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+ if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Got a message from VF [abs 0x%08x] that cannot be"
+ "Got indication for VF [abs 0x%08x] that cannot be"
" handled by PF\n",
abs_vfid);
- return ECORE_SUCCESS;
+ return OSAL_NULL;
}
- p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+}
+
+static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
+ u16 abs_vfid,
+ struct regpair *vf_msg)
+{
+ struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
+ abs_vfid);
+
+ if (!p_vf)
+ return ECORE_SUCCESS;
/* List the physical address of the request so that handler
* could later on copy the message from it.
@@ -3346,6 +3940,25 @@ static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
}
+static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
+ struct malicious_vf_eqe_data *p_data)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
+
+ if (!p_vf)
+ return;
+
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->errId);
+
+ p_vf->b_malicious = true;
+
+ OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
+}
+
enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
@@ -3359,6 +3972,9 @@ enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"VF-FLR is still not supported\n");
return ECORE_SUCCESS;
+ case COMMON_EVENT_MALICIOUS_VF:
+ ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
+ return ECORE_SUCCESS;
default:
DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
opcode);
@@ -3381,11 +3997,11 @@ u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
goto out;
for (i = rel_vf_id; i < p_iov->total_vfs; i++)
- if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+ if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
return i;
out:
- return MAX_NUM_VFS;
+ return E4_MAX_NUM_VFS;
}
enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -3427,6 +4043,12 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
"Can not set forced MAC, invalid vfid [%d]\n", vfid);
return;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced MAC to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
feature = 1 << MAC_ADDR_FORCED;
OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
@@ -3451,6 +4073,12 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
"Can not set MAC, invalid vfid [%d]\n", vfid);
return ECORE_INVAL;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set MAC to malicious VF [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -3476,7 +4104,14 @@ ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->p_dev, true,
- "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ "Can not set untagged default, invalid vfid [%d]\n",
+ vfid);
+ return ECORE_INVAL;
+ }
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set untagged default to malicious VF [%d]\n",
+ vfid);
return ECORE_INVAL;
}
@@ -3516,18 +4151,6 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
*opaque_fid = vf_info->opaque_fid;
}
-void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
- u8 *p_vort_id)
-{
- struct ecore_vf_info *vf_info;
-
- vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
- if (!vf_info)
- return;
-
- *p_vort_id = vf_info->vport_id;
-}
-
void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
u16 pvid, int vfid)
{
@@ -3541,6 +4164,12 @@ void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
vfid);
return;
}
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->p_dev, false,
+ "Can't set forced vlan to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
feature = 1 << VLAN_ADDR_FORCED;
vf_info->bulletin.p_virt->pvid = pvid;
@@ -3552,6 +4181,29 @@ void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
}
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct ecore_vf_info *vf_info;
+
+ vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->p_dev, true,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_vf_info *p_vf_info;
@@ -3734,30 +4386,6 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
}
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
- int vfid, u32 rate)
-{
- struct ecore_vf_info *vf;
- u8 vport_id;
- int i;
-
- for_each_hwfn(p_dev, i) {
- struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
- if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
- DP_NOTICE(p_hwfn, true,
- "SR-IOV sanity check failed,"
- " can't set min rate\n");
- return ECORE_INVAL;
- }
- }
-
- vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
- vport_id = vf->vport_id;
-
- return ecore_configure_vport_wfq(p_dev, vport_id, rate);
-}
-
enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
struct ecore_ptt *p_ptt,
int vfid,
@@ -3856,7 +4484,20 @@ bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
return (p_vf->state == VF_ENABLED);
}
-int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct ecore_vf_info *p_vf;
+
+ p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf)
+ return false;
+
+ return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
+}
+
+enum _ecore_status_t
+ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
{
struct ecore_wfq_data *vf_vp_wfq;
struct ecore_vf_info *vf_info;
diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h
index ed6ddc49..3c2f58bd 100644
--- a/drivers/net/qede/base/ecore_sriov.h
+++ b/drivers/net/qede/base/ecore_sriov.h
@@ -13,9 +13,10 @@
#include "ecore_vfpf_if.h"
#include "ecore_iov_api.h"
#include "ecore_hsi_common.h"
+#include "ecore_l2.h"
#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \
- (MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
+ (E4_MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS)
/* Represents a full message. Both the request filled by VF
* and the response filled by the PF. The VF needs one copy
@@ -62,12 +63,18 @@ struct ecore_iov_vf_mbx {
*/
};
-struct ecore_vf_q_info {
+struct ecore_vf_queue_cid {
+ bool b_is_tx;
+ struct ecore_queue_cid *p_cid;
+};
+
+/* Describes a qzone associated with the VF */
+struct ecore_vf_queue {
+ /* Input from upper-layer, mapping relateive queue to queue-zone */
u16 fw_rx_qid;
u16 fw_tx_qid;
- u8 fw_cid;
- u8 rxq_active;
- u8 txq_active;
+
+ struct ecore_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
};
enum vf_state {
@@ -97,6 +104,7 @@ struct ecore_vf_info {
struct ecore_iov_vf_mbx vf_mbx;
enum vf_state state;
bool b_init;
+ bool b_malicious;
u8 to_disable;
struct ecore_bulletin bulletin;
@@ -110,6 +118,7 @@ struct ecore_vf_info {
u16 mtu;
u8 vport_id;
+ u8 rss_eng_id;
u8 relative_vf_id;
u8 abs_vf_id;
#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \
@@ -125,7 +134,7 @@ struct ecore_vf_info {
u8 num_mac_filters;
u8 num_vlan_filters;
- struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
+ struct ecore_vf_queue vf_queues[ECORE_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF];
/* TODO - Only windows is using it - should be removed */
@@ -151,10 +160,9 @@ struct ecore_vf_info {
* capability enabled.
*/
struct ecore_pf_iov {
- struct ecore_vf_info vfs_array[MAX_NUM_VFS];
+ struct ecore_vf_info vfs_array[E4_MAX_NUM_VFS];
u64 pending_events[ECORE_VF_ARRAY_LENGTH];
u64 pending_flr[ECORE_VF_ARRAY_LENGTH];
- u16 base_vport_id;
#ifndef REMOVE_DBG
/* This doesn't serve anything functionally, but it makes windows
@@ -276,8 +284,8 @@ u32 ecore_crc32(u32 crc,
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
*/
-int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
- u32 *disabled_vfs);
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn,
+ u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h
index 6277bc80..c77ec260 100644
--- a/drivers/net/qede/base/ecore_status.h
+++ b/drivers/net/qede/base/ecore_status.h
@@ -10,6 +10,7 @@
#define __ECORE_STATUS_H__
enum _ecore_status_t {
+ ECORE_CONN_RESET = -13,
ECORE_UNKNOWN_ERROR = -12,
ECORE_NORESOURCES = -11,
ECORE_NODEV = -10,
diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h
index 616b44c2..034cf1eb 100644
--- a/drivers/net/qede/base/ecore_utils.h
+++ b/drivers/net/qede/base/ecore_utils.h
@@ -10,6 +10,12 @@
#define __ECORE_UTILS_H__
/* dma_addr_t manip */
+/* Suppress "right shift count >= width of type" warning when that quantity is
+ * 32-bits rquires the >> 16) >> 16)
+ */
+#define PTR_LO(x) ((u32)(((osal_uintptr_t)(x)) & 0xffffffff))
+#define PTR_HI(x) ((u32)((((osal_uintptr_t)(x)) >> 16) >> 16))
+
#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff))
#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32))
diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c
index be8b1ec4..f4d331cf 100644
--- a/drivers/net/qede/base/ecore_vf.c
+++ b/drivers/net/qede/base/ecore_vf.c
@@ -65,13 +65,15 @@ static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
}
-static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
- u8 *done, u32 resp_size)
+static enum _ecore_status_t
+ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
+ u8 *done, u32 resp_size)
{
union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
struct ustorm_trigger_vf_zone trigger;
struct ustorm_vf_zone *zone_data;
- int rc = ECORE_SUCCESS, time = 100;
+ enum _ecore_status_t rc = ECORE_SUCCESS;
+ int time = 100;
zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
@@ -81,16 +83,6 @@ static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
/* need to add the END TLV to the message size */
resp_size += sizeof(struct channel_list_end_tlv);
- if (!p_hwfn->p_dev->b_hw_channel) {
- rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
- done,
- p_req,
- p_hwfn->vf_iov_info->pf2vf_reply,
- sizeof(union vfpf_tlvs), resp_size);
- /* TODO - no prints about message ? */
- return rc;
- }
-
/* Send TLVs over HW channel */
OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
trigger.vf_pf_msg_valid = 1;
@@ -134,7 +126,6 @@ static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
"VF <-- PF Timeout [Type %d]\n",
p_req->first_tlv.tl.type);
rc = ECORE_TIMEOUT;
- return rc;
} else {
DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
"PF response: %d [Type %d]\n",
@@ -294,8 +285,17 @@ static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
" override\n");
req->vfdev_info.capabilities |=
VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
}
}
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, false,
+ "PF rejected acquisition by VF\n");
+ rc = ECORE_INVAL;
+ goto exit;
} else {
DP_ERR(p_hwfn,
"PF returned err %d to VF acquisition request\n",
@@ -386,8 +386,6 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
return ECORE_NOMEM;
}
- OSAL_MEMSET(p_iov, 0, sizeof(*p_iov));
-
/* Allocate vf2pf msg */
p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
&p_iov->
@@ -453,20 +451,174 @@ free_p_iov:
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
-enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
- u8 rx_qid,
- u16 sb,
- u8 sb_index,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void OSAL_IOMEM **pp_prod)
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls)
+{
+ if (p_src->b_update_mode) {
+ p_req->tun_mode_update_mask |= (1 << mask);
+
+ if (p_src->b_mode_enabled)
+ p_req->tunn_mode |= (1 << mask);
+ }
+
+ *p_cls = p_src->tun_cls;
+}
+
+/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */
+static void
+ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct ecore_tunn_update_type *p_src,
+ enum ecore_tunn_mode mask, u8 *p_cls,
+ struct ecore_tunn_update_udp_port *p_port,
+ u8 *p_update_port, u16 *p_udp_port)
+{
+ if (p_port->b_update_port) {
+ *p_update_port = 1;
+ *p_udp_port = p_port->port;
+ }
+
+ __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun)
+{
+ if (p_tun->vxlan.b_mode_enabled)
+ p_tun->vxlan.b_update_mode = true;
+ if (p_tun->l2_geneve.b_mode_enabled)
+ p_tun->l2_geneve.b_update_mode = true;
+ if (p_tun->ip_geneve.b_mode_enabled)
+ p_tun->ip_geneve.b_update_mode = true;
+ if (p_tun->l2_gre.b_mode_enabled)
+ p_tun->l2_gre.b_update_mode = true;
+ if (p_tun->ip_gre.b_mode_enabled)
+ p_tun->ip_gre.b_update_mode = true;
+
+ p_tun->b_update_rx_cls = true;
+ p_tun->b_update_tx_cls = true;
+}
+
+static void
+__ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun,
+ u16 feature_mask, u8 tunn_mode, u8 tunn_cls,
+ enum ecore_tunn_mode val)
+{
+ if (feature_mask & (1 << val)) {
+ p_tun->b_mode_enabled = tunn_mode;
+ p_tun->tun_cls = tunn_cls;
+ } else {
+ p_tun->b_mode_enabled = false;
+ }
+}
+
+static void
+ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tun,
+ struct pfvf_update_tunn_param_tlv *p_resp)
+{
+ /* Update mode and classes provided by PF */
+ u16 feat_mask = p_resp->tunn_feature_mask;
+
+ __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+ p_resp->vxlan_mode, p_resp->vxlan_clss,
+ ECORE_MODE_VXLAN_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+ p_resp->l2geneve_mode,
+ p_resp->l2geneve_clss,
+ ECORE_MODE_L2GENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+ p_resp->ipgeneve_mode,
+ p_resp->ipgeneve_clss,
+ ECORE_MODE_IPGENEVE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+ p_resp->l2gre_mode, p_resp->l2gre_clss,
+ ECORE_MODE_L2GRE_TUNN);
+ __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+ p_resp->ipgre_mode, p_resp->ipgre_clss,
+ ECORE_MODE_IPGRE_TUNN);
+ p_tun->geneve_port.port = p_resp->geneve_udp_port;
+ p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+ p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled,
+ p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+}
+
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_src)
+{
+ struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ enum _ecore_status_t rc;
+
+ p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ sizeof(*p_req));
+
+ if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+ p_req->update_tun_cls = 1;
+
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN,
+ &p_req->vxlan_clss, &p_src->vxlan_port,
+ &p_req->update_vxlan_port,
+ &p_req->vxlan_port);
+ ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+ ECORE_MODE_L2GENEVE_TUNN,
+ &p_req->l2geneve_clss, &p_src->geneve_port,
+ &p_req->update_geneve_port,
+ &p_req->geneve_port);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+ ECORE_MODE_IPGENEVE_TUNN,
+ &p_req->ipgeneve_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+ ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+ __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+ ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+ if (rc)
+ goto exit;
+
+ if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Failed to update tunnel parameters\n");
+ rc = ECORE_INVAL;
+ }
+
+ ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+enum _ecore_status_t
+ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ void OSAL_IOMEM **pp_prod)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_rxq_tlv *req;
- int rc;
+ u16 rx_qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
@@ -475,19 +627,20 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1; /* Keep initialized, for future compatibility */
/* If PF is legacy, we'll need to calculate producers ourselves
* as well as clean them.
*/
- if (pp_prod && p_iov->b_pre_fp_hsi) {
+ if (p_iov->b_pre_fp_hsi) {
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
u32 init_prod_val = 0;
- *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
+ *pp_prod = (u8 OSAL_IOMEM *)
+ p_hwfn->regview +
MSTORM_QZONE_START(p_hwfn->p_dev) +
(hw_qid) * MSTORM_QZONE_SIZE;
@@ -512,7 +665,7 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
}
/* Learn the address of the producer from the response */
- if (pp_prod && !p_iov->b_pre_fp_hsi) {
+ if (!p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0;
*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
@@ -536,17 +689,18 @@ exit:
}
enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_qid, bool cqe_completion)
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_rxqs_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
- req->rx_qid = rx_qid;
+ req->rx_qid = p_cid->rel.queue_id;
req->num_rxqs = 1;
req->cqe_completion = cqe_completion;
@@ -571,29 +725,28 @@ exit:
return rc;
}
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell)
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req;
- int rc;
+ u16 qid = p_cid->rel.queue_id;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
- req->tx_qid = tx_queue_id;
+ req->tx_qid = qid;
/* Tx */
req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size;
- req->hw_sb = sb;
- req->sb_index = sb_index;
+ req->hw_sb = p_cid->rel.sb;
+ req->sb_index = p_cid->rel.sb_idx;
/* add list termination tlv */
ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -610,42 +763,40 @@ enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
goto exit;
}
- if (pp_doorbell) {
- /* Modern PFs provide the actual offsets, while legacy
- * provided only the queue id.
- */
- if (!p_iov->b_pre_fp_hsi) {
- *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- resp->offset;
- } else {
- u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
-
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
- DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
- }
+ resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[qid];
- DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
- "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
- tx_queue_id, *pp_doorbell, resp->offset);
+ *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
+ DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
}
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, *pp_doorbell, resp->offset);
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
return rc;
}
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_txqs_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
- req->tx_qid = tx_qid;
+ req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1;
/* add list termination tlv */
@@ -670,20 +821,36 @@ exit:
}
enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
+ struct ecore_queue_cid **pp_cid,
u8 num_rxqs,
- u8 comp_cqe_flg, u8 comp_event_flg)
+ u8 comp_cqe_flg,
+ u8 comp_event_flg)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
struct vfpf_update_rxq_tlv *req;
- int rc;
+ enum _ecore_status_t rc;
+
+ /* TODO - API is limited to assuming continuous regions of queues,
+ * but VF queues might not fullfil this requirement.
+ * Need to consider whether we need new TLVs for this, or whether
+ * simply doing it iteratively is good enough.
+ */
+ if (!num_rxqs)
+ return ECORE_INVAL;
+again:
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
- req->rx_qid = rx_queue_id;
- req->num_rxqs = num_rxqs;
+ /* Find the length of the current contagious range of queues beginning
+ * at first queue's index.
+ */
+ req->rx_qid = (*pp_cid)->rel.queue_id;
+ for (req->num_rxqs = 1; req->num_rxqs < num_rxqs; req->num_rxqs++)
+ if (pp_cid[req->num_rxqs]->rel.queue_id !=
+ req->rx_qid + req->num_rxqs)
+ break;
if (comp_cqe_flg)
req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
@@ -704,9 +871,17 @@ enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
goto exit;
}
+ /* Make sure we're done with all the queues */
+ if (req->num_rxqs < num_rxqs) {
+ num_rxqs -= req->num_rxqs;
+ pp_cid += req->num_rxqs;
+ /* TODO - should we give a non-locked variant instead? */
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ goto again;
+ }
+
exit:
ecore_vf_pf_req_end(p_hwfn, rc);
-
return rc;
}
@@ -719,7 +894,8 @@ ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_vport_start_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc, i;
+ enum _ecore_status_t rc;
+ int i;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
@@ -761,7 +937,7 @@ enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
@@ -859,7 +1035,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
u8 update_rx, update_tx;
u32 resp_size = 0;
u16 size, tlv;
- int rc;
+ enum _ecore_status_t rc;
resp = &p_iov->pf2vf_reply->default_resp;
resp_size = sizeof(*resp);
@@ -956,6 +1132,7 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
if (p_params->rss_params) {
struct ecore_rss_params *rss_params = p_params->rss_params;
struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ int i, table_size;
size = sizeof(struct vfpf_vport_update_rss_tlv);
p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
@@ -977,8 +1154,16 @@ ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
p_rss_tlv->rss_enable = rss_params->rss_enable;
p_rss_tlv->rss_caps = rss_params->rss_caps;
p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
- OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
- sizeof(rss_params->rss_ind_table));
+
+ table_size = OSAL_MIN_T(int, T_ETH_INDIRECTION_TABLE_SIZE,
+ 1 << p_rss_tlv->rss_table_size_log);
+ for (i = 0; i < table_size; i++) {
+ struct ecore_queue_cid *p_queue;
+
+ p_queue = rss_params->rss_ind_table[i];
+ p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+ }
+
OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
sizeof(rss_params->rss_key));
}
@@ -1067,7 +1252,7 @@ enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
@@ -1101,7 +1286,7 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
struct pfvf_def_resp_tlv *resp;
struct vfpf_first_tlv *req;
u32 size;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
@@ -1140,7 +1325,6 @@ enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
}
OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
- p_hwfn->vf_iov_info = OSAL_NULL;
return rc;
}
@@ -1173,7 +1357,7 @@ enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_ucast_filter_tlv *req;
struct pfvf_def_resp_tlv *resp;
- int rc;
+ enum _ecore_status_t rc;
/* Sanitize */
if (p_ucast->opcode == ECORE_FILTER_MOVE) {
@@ -1214,7 +1398,7 @@ enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
{
struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
- int rc;
+ enum _ecore_status_t rc;
/* clear mailbox and prep first tlv */
ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
@@ -1240,6 +1424,48 @@ exit:
return rc;
}
+enum _ecore_status_t
+ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn, u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid)
+{
+ struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_coalesce *req;
+ struct pfvf_def_resp_tlv *resp;
+ enum _ecore_status_t rc;
+
+ /* clear mailbox and prep header tlv */
+ req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(*req));
+
+ req->rx_coal = rx_coal;
+ req->tx_coal = tx_coal;
+ req->qid = p_cid->rel.queue_id;
+
+ DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+ "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
+ rx_coal, tx_coal, req->qid);
+
+ /* add list termination tlv */
+ ecore_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (rc != ECORE_SUCCESS)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ p_hwfn->p_dev->rx_coalesce_usecs = rx_coal;
+ p_hwfn->p_dev->tx_coalesce_usecs = tx_coal;
+
+exit:
+ ecore_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
u16 sb_id)
{
@@ -1356,6 +1582,12 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
}
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs)
+{
+ *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
+}
+
void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
{
OSAL_MEMCPY(port_mac,
@@ -1372,23 +1604,21 @@ void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
*num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
}
-/* @DPDK */
-void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
- u32 *num_mac)
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs)
{
struct ecore_vf_iov *p_vf;
p_vf = p_hwfn->vf_iov_info;
- *num_mac = p_vf->acquire_resp.resc.num_mac_filters;
+ *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
}
-void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
- u32 *num_sbs)
+void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
+ u32 *num_mac_filters)
{
- struct ecore_vf_iov *p_vf;
+ struct ecore_vf_iov *p_vf = p_hwfn->vf_iov_info;
- p_vf = p_hwfn->vf_iov_info;
- *num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
+ *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
}
bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
@@ -1428,6 +1658,18 @@ bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
return true;
}
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port,
+ u16 *p_geneve_port)
+{
+ struct ecore_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
{
struct ecore_bulletin_content *bulletin;
diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h
index 6077d600..f4713884 100644
--- a/drivers/net/qede/base/ecore_vf.h
+++ b/drivers/net/qede/base/ecore_vf.h
@@ -38,10 +38,33 @@ struct ecore_vf_iov {
bool b_pre_fp_hsi;
};
+
+enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
+ struct ecore_ptt *p_ptt,
+ u16 coalesce,
+ struct ecore_queue_cid *p_cid);
+/**
+ * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
+ * Coalesce value '0' will omit the configuration.
+ *
+ * @param p_hwfn
+ * @param rx_coal - coalesce value in micro second for rx queue
+ * @param tx_coal - coalesce value in micro second for tx queue
+ * @param queue_cid
+ *
+ **/
+enum _ecore_status_t ecore_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal,
+ struct ecore_queue_cid *p_cid);
+
#ifdef CONFIG_ECORE_SRIOV
/**
* @brief hw preparation for VF
- * sends ACQUIRE message
+ * sends ACQUIRE message
*
* @param p_hwfn
*
@@ -53,10 +76,7 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
* @brief VF - start the RX Queue by sending a message to the PF
*
* @param p_hwfn
- * @param cid - zero based within the VF
- * @param rx_queue_id - zero based within the VF
- * @param sb - VF status block for this queue
- * @param sb_index - Index within the status block
+ * @param p_cid - Only relative fields are relevant
* @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl
@@ -67,9 +87,7 @@ enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn);
* @return enum _ecore_status_t
*/
enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
- u8 rx_queue_id,
- u16 sb,
- u8 sb_index,
+ struct ecore_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
@@ -81,46 +99,44 @@ enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
* PF.
*
* @param p_hwfn
- * @param tx_queue_id - zero based within the VF
- * @param sb - status block for this queue
- * @param sb_index - index within the status block
+ * @param p_cid
* @param bd_chain_phys_addr - physical address of tx chain
* @param pp_doorbell - pointer to address to which to
* write the doorbell too..
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
- u16 tx_queue_id,
- u16 sb,
- u8 sb_index,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- void OSAL_IOMEM **pp_doorbell);
+enum _ecore_status_t
+ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size,
+ void OSAL_IOMEM **pp_doorbell);
/**
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
- * @param rx_qid
+ * @param p_cid
* @param cqe_completion
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
- u16 rx_qid,
- bool cqe_completion);
+enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid,
+ bool cqe_completion);
/**
* @brief VF - stop the TX queue by sending a message to the PF
*
* @param p_hwfn
- * @param tx_qid
+ * @param p_cid
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
- u16 tx_qid);
+enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid *p_cid);
+
+/* TODO - fix all the !SRIOV prototypes */
#ifndef LINUX_REMOVE
/**
@@ -128,20 +144,18 @@ enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn,
* PF
*
* @param p_hwfn
- * @param rx_queue_id
+ * @param pp_cid - list of queue-cids which we want to update
* @param num_rxqs
- * @param init_sge_ring
* @param comp_cqe_flg
* @param comp_event_flg
*
* @return enum _ecore_status_t
*/
-enum _ecore_status_t ecore_vf_pf_rxqs_update(
- struct ecore_hwfn *p_hwfn,
- u16 rx_queue_id,
- u8 num_rxqs,
- u8 comp_cqe_flg,
- u8 comp_event_flg);
+enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_queue_cid **pp_cid,
+ u8 num_rxqs,
+ u8 comp_cqe_flg,
+ u8 comp_event_flg);
#endif
/**
@@ -267,5 +281,10 @@ void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
struct ecore_mcp_link_capabilities *p_link_caps,
struct ecore_bulletin_content *p_bulletin);
+enum _ecore_status_t
+ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn,
+ struct ecore_tunnel_info *p_tunn);
+
+void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun);
#endif
#endif /* __ECORE_VF_H__ */
diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h
index 571fd374..be3a326b 100644
--- a/drivers/net/qede/base/ecore_vf_api.h
+++ b/drivers/net/qede/base/ecore_vf_api.h
@@ -61,6 +61,15 @@ void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn,
u8 *num_rxqs);
/**
+ * @brief Get number of Rx queues allocated for VF by ecore
+ *
+ * @param p_hwfn
+ * @param num_txqs - allocated RX queues
+ */
+void ecore_vf_get_num_txqs(struct ecore_hwfn *p_hwfn,
+ u8 *num_txqs);
+
+/**
* @brief Get port mac address for VF
*
* @param p_hwfn
@@ -78,18 +87,18 @@ void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn,
void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
u8 *num_vlan_filters);
+void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
+ u32 *num_sbs);
+
/**
* @brief Get number of MAC filters allocated for VF by ecore
*
- * @param p_hwfn
- * @param num_mac_filters - allocated MAC filters
+ * @param p_hwfn
+ * @param num_rxqs - allocated MAC filters
*/
void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
u32 *num_mac_filters);
-void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
- u32 *num_sbs);
-
/**
* @brief Check if VF can set a MAC address
*
@@ -152,5 +161,7 @@ void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
u16 *fw_minor,
u16 *fw_rev,
u16 *fw_eng);
+void ecore_vf_bulletin_get_udp_ports(struct ecore_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port);
#endif
#endif
diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h
index 149d092b..66184421 100644
--- a/drivers/net/qede/base/ecore_vfpf_if.h
+++ b/drivers/net/qede/base/ecore_vfpf_if.h
@@ -416,10 +416,55 @@ struct vfpf_ucast_filter_tlv {
u16 padding[3];
};
+/* tunnel update param tlv */
+struct vfpf_update_tunn_param_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 tun_mode_update_mask;
+ u8 tunn_mode;
+ u8 update_tun_cls;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u8 update_geneve_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u16 vxlan_port;
+ u8 padding[2];
+};
+
+struct pfvf_update_tunn_param_tlv {
+ struct pfvf_tlv hdr;
+
+ u16 tunn_feature_mask;
+ u8 vxlan_mode;
+ u8 l2geneve_mode;
+ u8 ipgeneve_mode;
+ u8 l2gre_mode;
+ u8 ipgre_mode;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+};
+
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
+struct vfpf_update_coalesce {
+ struct vfpf_first_tlv first_tlv;
+ u16 rx_coal;
+ u16 tx_coal;
+ u16 qid;
+ u8 padding[2];
+};
+
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
@@ -431,6 +476,8 @@ union vfpf_tlvs {
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
+ struct vfpf_update_tunn_param_tlv tunn_param_update;
+ struct vfpf_update_coalesce update_coalesce;
struct tlv_buffer_size tlv_buf_size;
};
@@ -439,6 +486,7 @@ union pfvf_tlvs {
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
+ struct pfvf_update_tunn_param_tlv tunn_param_resp;
};
/* This is a structure which is allocated in the VF, which the PF may update
@@ -506,9 +554,12 @@ struct ecore_bulletin_content {
u8 pfc_enabled;
u8 partner_tx_flow_ctrl_en;
u8 partner_rx_flow_ctrl_en;
+
u8 partner_adv_pause;
u8 sfp_tx_fault;
- u8 padding4[6];
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
u32 speed;
u32 partner_adv_speed;
@@ -552,6 +603,8 @@ enum {
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ CHANNEL_TLV_COALESCE_UPDATE,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h
index 32130709..6dc969b0 100644
--- a/drivers/net/qede/base/eth_common.h
+++ b/drivers/net/qede/base/eth_common.h
@@ -34,6 +34,14 @@
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+/* Limitation for Tunneled LSO Packets on the offset (in bytes) of the inner IP
+ * header (relevant to LSO for tunneled packet):
+ */
+/* Offset is limited to 253 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+/* Offset is limited to 251 bytes (inclusive). */
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
@@ -141,16 +149,23 @@ struct eth_tx_1st_bd_flags {
/* Do not allow additional VLAN manipulations on this packet. */
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1
-/* IP checksum recalculation in needed */
+/* Recalculate IP checksum. For tunneled packet - relevant to inner header. */
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2
-/* TCP/UDP checksum recalculation in needed */
+/* Recalculate TCP/UDP checksum.
+ * For tunneled packet - relevant to inner header.
+ */
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3
-/* If set, need to add the VLAN in vlan field to the packet. */
+/* If set, insert VLAN tag from vlan field to the packet.
+ * For tunneled packet - relevant to outer header.
+ */
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4
-/* If set, this is an LSO packet. */
+/* If set, this is an LSO packet. Note: For Tunneled LSO packets, the offset of
+ * the inner IPV4 (and IPV6) header is limited to 253 (and 251 respectively)
+ * bytes, inclusive.
+ */
#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1
#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5
/* Recalculate Tunnel IP Checksum (if Tunnel IP Header is IPv4) */
@@ -165,8 +180,9 @@ struct eth_tx_1st_bd_flags {
* The parsing information data for the first tx bd of a given packet.
*/
struct eth_tx_data_1st_bd {
- __le16 vlan /* VLAN tag to insert to packet (if needed). */;
-/* Number of BDs in packet. Should be at least 2 in non-LSO packet and at least
+/* VLAN tag to insert to packet (if enabled by vlan_insertion flag). */
+ __le16 vlan;
+/* Number of BDs in packet. Should be at least 1 in non-LSO packet and at least
* 3 in LSO (or Tunnel with IPv6+ext) packet.
*/
u8 nbds;
@@ -209,10 +225,14 @@ struct eth_tx_data_2nd_bd {
/* For LSO / Tunnel header with IPv6+ext - Set if inner header is IPv6 */
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11
-/* For LSO / Tunnel header with IPv6+ext - Set if outer header has IPv6+ext */
+/* In tunneling mode - Set to 1 when the Inner header is IPv6 with extension.
+ * Otherwise set to 1 if the header is IPv6 with extension.
+ */
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12
-/* Set if Tunnel header has IPv6 ext. (3rd BD is required) */
+/* Set to 1 if Tunnel (outer = encapsulating) header has IPv6 ext. (Note: 3rd BD
+ * is required, hence EDPM does not support Tunnel [outer] header with Ipv6Ext)
+ */
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1
#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13
/* Set if (inner) L4 protocol is UDP. (Required when IPv6+ext (or tunnel with
diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h
index 96efc3c8..fcf98477 100644
--- a/drivers/net/qede/base/mcp_public.h
+++ b/drivers/net/qede/base/mcp_public.h
@@ -84,9 +84,32 @@ struct eth_phy_cfg {
/* Remote Serdes Loopback (RX to TX) */
#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY (9)
- /* features */
- u32 feature_config_flags;
-#define ETH_EEE_MODE_ADV_LPI (1 << 0)
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * 16xmicroseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in 16xmicroseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_TIMER_USEC_MASK (0x000fffff)
+#define EEE_MODE_TIMER_USEC_OFFSET (0)
+#define EEE_MODE_TIMER_USEC_BALANCED_TIME (0xa00)
+#define EEE_MODE_TIMER_USEC_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_TIMER_USEC_LATENCY_TIME (0x6000)
+/* Set by the driver to request status timer will be in microseconds and and not
+ * in EEE policy definition
+ */
+#define EEE_MODE_OUTPUT_TIME (1 << 28)
+/* Set by the driver to override default nvm timer */
+#define EEE_MODE_OVERRIDE_NVRAM (1 << 29)
+#define EEE_MODE_ENABLE_LPI (1 << 30) /* Set when */
+#define EEE_MODE_ADV_LPI (1 << 31) /* Set when EEE is enabled */
};
struct port_mf_cfg {
@@ -271,16 +294,20 @@ struct dcbx_ets_feature {
#define DCBX_ETS_CBS_SHIFT 3
#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
-#define DCBX_ISCSI_OOO_TC_SHIFT 8
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
/* Entries in tc table are orginized that the left most is pri 0, right most is
* prio 7
*/
u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC (4)
+/* Fixed TCP OOO TC usage is deprecated and used only for driver backward
+ * compatibility
+ */
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
#define DCBX_CEE_STRICT_PRIORITY 0xf
/* Entries in tc table are orginized that the left most is pri 0, right most is
* prio 7
@@ -447,6 +474,14 @@ struct public_global {
#define MDUMP_REASON_INTERNAL_ERROR (1 << 0)
#define MDUMP_REASON_EXTERNAL_TRIGGER (1 << 1)
#define MDUMP_REASON_DUMP_AGED (1 << 2)
+ u32 ext_phy_upgrade_fw;
+#define EXT_PHY_FW_UPGRADE_STATUS_MASK (0x0000ffff)
+#define EXT_PHY_FW_UPGRADE_STATUS_SHIFT (0)
+#define EXT_PHY_FW_UPGRADE_STATUS_IN_PROGRESS (1)
+#define EXT_PHY_FW_UPGRADE_STATUS_FAILED (2)
+#define EXT_PHY_FW_UPGRADE_STATUS_SUCCESS (3)
+#define EXT_PHY_FW_UPGRADE_TYPE_MASK (0xffff0000)
+#define EXT_PHY_FW_UPGRADE_TYPE_SHIFT (16)
};
/**************************************/
@@ -553,23 +588,20 @@ struct public_port {
#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
u32 link_status;
-#define LINK_STATUS_LINK_UP 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-
-#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+#define LINK_STATUS_PFC_ENABLED 0x00000100
#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
@@ -578,25 +610,23 @@ struct public_port {
#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-
#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-
-#define LINK_STATUS_SFP_TX_FAULT 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
-#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
-#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
-#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
-
-#define LINK_STATUS_FEC_MODE_MASK 0x38000000
-#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
-#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
-#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_EXT_PHY_LINK_UP 0x40000000
u32 link_status1;
u32 ext_phy_fw_version;
@@ -654,45 +684,47 @@ struct public_port {
u32 fc_npiv_nvram_tbl_addr;
u32 fc_npiv_nvram_tbl_size;
u32 transceiver_data;
-#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
-#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
-#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
-#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
-#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
-#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
-#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
-#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000FF00
+#define ETH_TRANSCEIVER_TYPE_SHIFT 0x00000008
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00000000
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF
/* 1G Passive copper cable */
-#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
/* 1G Active copper cable */
-#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
-#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
-#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
-#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
-#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
-#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
-#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
/* 10G Passive copper cable */
-#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
/* 10G Active copper cable */
-#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
-#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
-#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
-#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
-#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
-#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f /* Active optical cable */
-#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
-#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
-#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
-#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 /* Active copper cable */
-#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
-#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+/* Active optical cable */
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+/* Active copper cable */
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
/* 25G Passive copper cable - short */
-#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
/* 25G Active copper cable - short */
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
/* 25G Passive copper cable - medium */
#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
/* 25G Active copper cable - medium */
@@ -718,6 +750,39 @@ struct public_port {
u32 wol_pkt_len;
u32 wol_pkt_details;
struct dcb_dscp_map dcb_dscp_map;
+
+ /* the status of EEE auto-negotiation
+ * bits 19:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted if 30:29
+ * are 2'b11.
+ * bit 31 - When 1'b0 bits 15:0 contain
+ * NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_XXX define as value.
+ * When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status;
+#define EEE_TIMER_MASK 0x000fffff
+#define EEE_ADV_STATUS_MASK 0x00f00000
+#define EEE_1G_ADV (1 << 1)
+#define EEE_10G_ADV (1 << 2)
+#define EEE_ADV_STATUS_SHIFT 20
+#define EEE_LP_ADV_STATUS_MASK 0x0f000000
+#define EEE_LP_ADV_STATUS_SHIFT 24
+#define EEE_REQUESTED_BIT 0x10000000
+#define EEE_LPI_REQUESTED_BIT 0x20000000
+#define EEE_ACTIVE_BIT 0x40000000
+#define EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 eee_remote; /* Used for EEE in LLDP */
+#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
+#define EEE_REMOTE_TW_TX_SHIFT 0
+#define EEE_REMOTE_TW_RX_MASK 0xffff0000
+#define EEE_REMOTE_TW_RX_SHIFT 16
};
/**************************************/
@@ -813,9 +878,11 @@ struct public_func {
#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
#define DRV_ID_PDA_COMP_VER_SHIFT 0
+#define LOAD_REQ_HSI_VERSION 2
#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
#define DRV_ID_DRV_TYPE_MASK 0x7f000000
#define DRV_ID_DRV_TYPE_SHIFT 24
@@ -958,6 +1025,7 @@ enum resource_id_enum {
RESOURCE_NUM_RSS_ENGINES_E = 14,
RESOURCE_LL2_QUEUE_E = 15,
RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
RESOURCE_MAX_NUM,
RESOURCE_NUM_INVALID = 0xFFFFFFFF
};
@@ -975,8 +1043,47 @@ struct resource_info {
#define RESOURCE_ELEMENT_STRICT (1 << 0)
};
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; /* LOAD_REQ */
struct mcp_mac wol_mac; /* UNLOAD_DONE */
/* This configuration should be set by the driver for the LINK_SET command. */
@@ -995,13 +1102,17 @@ union drv_union_data {
struct lan_stats_stc lan_stats;
struct fcoe_stats_stc fcoe_stats;
- struct iscsi_stats_stc icsci_stats;
+ struct iscsi_stats_stc iscsi_stats;
struct rdma_stats_stc rdma_stats;
struct ocbb_data_stc ocbb_info;
struct temperature_status_stc temp_info;
struct resource_info resource;
struct bist_nvm_image_att nvm_image_att;
struct mdump_config_stc mdump_config;
+ u32 dword;
+
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
/* ... */
};
@@ -1011,6 +1122,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_LOAD_REQ 0x10000000
#define DRV_MSG_CODE_LOAD_DONE 0x11000000
#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
#define DRV_MSG_CODE_INIT_PHY 0x22000000
@@ -1026,16 +1138,15 @@ struct public_drv_mb {
#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
-
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-
/* DRV_MB Param: driver version supp, FW_MB param: MFW version supp,
* data: struct resource_info
*/
#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
+#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
/*deprecated don't use*/
#define DRV_MSG_CODE_INITIATE_FLR_DEPRECATED 0x02000000
@@ -1087,19 +1198,6 @@ struct public_drv_mb {
* MCP_REG_CPU_STATE/MCP_REG_CPU_MODE registers.
*/
#define DRV_MSG_CODE_MCP_HALT 0x00100000
-/* Host shall provide buffer and size for MFW */
-#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
-/* Host shall provide buffer and size for MFW */
-#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
-/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address,
- * [16:31] - offset
- */
-#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
-/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address,
- * [16:31] - offset
- */
-#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
-
/* Set virtual mac address, params [31:6] - reserved, [5:4] - type,
* [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
*/
@@ -1108,20 +1206,31 @@ struct public_drv_mb {
* [3:0] - func, drv_data[7:0] - MAC/WWNN/WWPN
*/
#define DRV_MSG_CODE_GET_VMAC 0x00120000
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
-
/* Get statistics from pf, params [31:4] - reserved, [3:0] - stats type */
#define DRV_MSG_CODE_GET_STATS 0x00130000
#define DRV_MSG_CODE_STATS_TYPE_LAN 1
#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
-#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
-
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000
+/* Host shall provide buffer and size for MFW */
+#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000
+/* Param: [0:1] - Port, [2:7] - read size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
+/* Param: [0:1] - Port, [2:7] - write size, [8:15] - I2C address,
+ * [16:31] - offset
+ */
+#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000
/* indicate OCBB related information */
#define DRV_MSG_CODE_OCBB_DATA 0x00180000
-
/* Set function BW, params[15:8] - min, params[7:0] - max */
#define DRV_MSG_CODE_SET_BW 0x00190000
#define BW_MAX_MASK 0x000000ff
@@ -1137,14 +1246,10 @@ struct public_drv_mb {
#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000
#define DRV_MSG_FAN_FAILURE_TYPE (1 << 0)
#define DRV_MSG_TEMPERATURE_FAILURE_TYPE (1 << 1)
-
/* Param: [0:15] - gpio number */
#define DRV_MSG_CODE_GPIO_READ 0x001c0000
/* Param: [0:15] - gpio number, [16:31] - gpio value */
#define DRV_MSG_CODE_GPIO_WRITE 0x001d0000
-/* Param: [0:15] - gpio number */
-#define DRV_MSG_CODE_GPIO_INFO 0x00270000
-
/* Param: [0:7] - test enum, [8:15] - image index, [16:31] - reserved */
#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_GET_TEMPERATURE 0x001f0000
@@ -1157,11 +1262,16 @@ struct public_drv_mb {
#define DRV_MSG_CODE_TIMESTAMP 0x00210000
/* This is an empty mailbox just return OK*/
#define DRV_MSG_CODE_EMPTY_MB 0x00220000
+
/* Param[0:4] - resource number (0-31), Param[5:7] - opcode,
* param[15:8] - age
*/
#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
/* request resource ownership with default aging */
#define RESOURCE_OPCODE_REQ 1
/* request resource ownership without aging */
@@ -1169,8 +1279,15 @@ struct public_drv_mb {
/* request resource ownership with specific aging timer (in seconds) */
#define RESOURCE_OPCODE_REQ_W_AGING 3
#define RESOURCE_OPCODE_RELEASE 4 /* release resource */
-#define RESOURCE_OPCODE_FORCE_RELEASE 5 /* force resource release */
-
+/* force resource release */
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
/* resource is free and granted to requester */
#define RESOURCE_OPCODE_GNT 1
/* resource is busy, param[7:0] indicates owner as follow 0-15 = PF0-15,
@@ -1184,11 +1301,11 @@ struct public_drv_mb {
/* indicate wrong owner during release */
#define RESOURCE_OPCODE_WRONG_OWNER 5
#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
/* dedicate resource 0 for dump */
-#define RESOURCE_DUMP (1 << 0)
+#define RESOURCE_DUMP 0
#define DRV_MSG_CODE_GET_MBA_VERSION 0x00240000 /* Get MBA version */
-
/* Send crash dump commands with param[3:0] - opcode */
#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
@@ -1202,14 +1319,26 @@ struct public_drv_mb {
#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
/* Request valid logs and config words */
#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
-/* Set triggers mask. drv_mb_param should indicate (bitwise) which trigger
- * enabled
+/* Set triggers mask. drv_mb_param should indicate (bitwise) which
+ * trigger enabled
*/
#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
-#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 /* Clear all logs */
-
-
+/* Clear all logs */
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
#define DRV_MSG_CODE_MEM_ECC_EVENTS 0x00260000 /* Param: None */
+/* Param: [0:15] - gpio number */
+#define DRV_MSG_CODE_GPIO_INFO 0x00270000
+/* Value will be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_READ 0x00280000
+/* Value should be placed in union */
+#define DRV_MSG_CODE_EXT_PHY_WRITE 0x00290000
+#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
+#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_MASK 0x00600000
+#define DRV_MSG_CODE_EXT_PHY_FW_UPGRADE 0x002a0000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -1360,12 +1489,16 @@ struct public_drv_mb {
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_UNSUPPORTED 0x00000000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
@@ -1418,6 +1551,10 @@ struct public_drv_mb {
#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
/* MFW reject "mcp reset" command if one of the drivers is up */
#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
+#define FW_MSG_CODE_NVM_FAILED_CALC_HASH 0x00310000
+#define FW_MSG_CODE_NVM_PUBLIC_KEY_MISSING 0x00320000
+#define FW_MSG_CODE_NVM_INVALID_PUBLIC_KEY 0x00330000
+
#define FW_MSG_CODE_PHY_OK 0x00110000
#define FW_MSG_CODE_PHY_ERROR 0x00120000
#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
@@ -1425,25 +1562,30 @@ struct public_drv_mb {
#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
#define FW_MSG_CODE_OK 0x00160000
#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000
-#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
-#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000
+#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000
#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE 0x00040000
#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE 0x00170000
#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000
#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE 0x000c0000
#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH 0x00100000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
+#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
-#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
-#define FW_MSG_CODE_GPIO_OK 0x00160000
-#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
+#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000
+#define FW_MSG_CODE_GPIO_OK 0x00160000
+#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000
#define FW_MSG_CODE_GPIO_CTRL_ERR 0x00020000
#define FW_MSG_CODE_GPIO_INVALID 0x000f0000
-#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
+#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000
#define FW_MSG_CODE_BIST_TEST_INVALID 0x000f0000
+#define FW_MSG_CODE_EXTPHY_INVALID_IMAGE_HEADER 0x00700000
+#define FW_MSG_CODE_EXTPHY_INVALID_PHY_TYPE 0x00710000
+#define FW_MSG_CODE_EXTPHY_OPERATION_FAILED 0x00720000
+#define FW_MSG_CODE_EXTPHY_NO_PHY_DETECTED 0x00730000
+#define FW_MSG_CODE_RECOVERY_MODE 0x00740000
-/* mdump related response codes */
+ /* mdump related response codes */
#define FW_MSG_CODE_MDUMP_NO_IMAGE_FOUND 0x00010000
#define FW_MSG_CODE_MDUMP_ALLOC_FAILED 0x00020000
#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
@@ -1454,7 +1596,7 @@ struct public_drv_mb {
u32 fw_mb_param;
- /* Resource Allocation params - MFW version support*/
+/* Resource Allocation params - MFW version support */
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF
@@ -1523,6 +1665,7 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_FAILURE_DETECTED,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
+ MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
MFW_DRV_MSG_MAX
};
diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h
index 8e9c08a7..4e588350 100644
--- a/drivers/net/qede/base/nvm_cfg.h
+++ b/drivers/net/qede/base/nvm_cfg.h
@@ -13,13 +13,21 @@
* Description: NVM config file - Generated file from nvm cfg excel.
* DO NOT MODIFY !!!
*
- * Created: 5/9/2016
+ * Created: 12/15/2016
*
****************************************************************************/
#ifndef NVM_CFG_H
#define NVM_CFG_H
+#define NVM_CFG_version 0x81805
+
+#define NVM_CFG_new_option_seq 15
+
+#define NVM_CFG_removed_option_seq 0
+
+#define NVM_CFG_updated_value_seq 1
+
struct nvm_cfg_mac_address {
u32 mac_addr_hi;
#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
@@ -64,10 +72,12 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
- #define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_MASK \
+ 0x80000000
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_DISABLED \
+ 0x0
+ #define NVM_CFG1_GLOB_RESERVED__M_WAS_CLOCK_SLOWDOWN_ENABLED 0x1
u32 engineering_change[3]; /* 0x4 */
u32 manufacturing_id; /* 0x10 */
u32 serial_number[4]; /* 0x14 */
@@ -144,6 +154,7 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+ #define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xF
#define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_MASK 0x00000100
#define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_OFFSET 8
#define NVM_CFG1_GLOB_MPS10_ENFORCE_TX_FIR_CFG_DISABLED 0x0
@@ -241,6 +252,11 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1
#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2
+ /* ROL enable */
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_OFFSET 31
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RESET_ON_LAN_ENABLED 0x1
u32 f_lane_cfg1; /* 0x38 */
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
@@ -469,6 +485,15 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
+ /* Select package id method */
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_MASK 0x40000000
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_OFFSET 30
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_NVRAM 0x0
+ #define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_IO_IO_PINS 0x1
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_MASK 0x80000000
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_OFFSET 31
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_DISABLED 0x0
+ #define NVM_CFG1_GLOB_RECOVERY_MODE_ENABLED 0x1
u32 manufacture_time; /* 0x70 */
#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
@@ -476,6 +501,14 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
+ /* Max MSIX for Ethernet in default mode */
+ #define NVM_CFG1_GLOB_MAX_MSIX_MASK 0x03FC0000
+ #define NVM_CFG1_GLOB_MAX_MSIX_OFFSET 18
+ /* PF Mapping */
+ #define NVM_CFG1_GLOB_PF_MAPPING_MASK 0x0C000000
+ #define NVM_CFG1_GLOB_PF_MAPPING_OFFSET 26
+ #define NVM_CFG1_GLOB_PF_MAPPING_CONTINUOUS 0x0
+ #define NVM_CFG1_GLOB_PF_MAPPING_FIXED 0x1
u32 led_global_settings; /* 0x74 */
#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
@@ -485,6 +518,47 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
+ /* Max. continues operating temperature */
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_MAX_CONT_OPERATING_TEMP_OFFSET 16
+ /* GPIO which triggers run-time port swap according to the map
+ * specified in option 205
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_RUNTIME_PORT_SWAP_GPIO_GPIO31 0x20
u32 generic_cont1; /* 0x78 */
#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
@@ -496,6 +570,25 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_LANE2_SWAP_OFFSET 14
#define NVM_CFG1_GLOB_LANE3_SWAP_MASK 0x00030000
#define NVM_CFG1_GLOB_LANE3_SWAP_OFFSET 16
+ /* Enable option 195 - Overriding the PCIe Preset value */
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_MASK 0x00040000
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_OFFSET 18
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_DISABLED 0x0
+ #define NVM_CFG1_GLOB_OVERRIDE_PCIE_PRESET_EQUAL_ENABLED 0x1
+ /* PCIe Preset value - applies only if option 194 is enabled */
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_MASK 0x00780000
+ #define NVM_CFG1_GLOB_PCIE_PRESET_VALUE_OFFSET 19
+ /* Port mapping to be used when the run-time GPIO for port-swap is
+ * defined and set.
+ */
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_MASK 0x01800000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT0_SWAP_MAP_OFFSET 23
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_MASK 0x06000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT1_SWAP_MAP_OFFSET 25
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_MASK 0x18000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT2_SWAP_MAP_OFFSET 27
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_MASK 0x60000000
+ #define NVM_CFG1_GLOB_RUNTIME_PORT3_SWAP_MAP_OFFSET 29
u32 mbi_version; /* 0x7C */
#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
@@ -503,6 +596,44 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+ /* If set to other than NA, 0 - Normal operation, 1 - Thermal event
+ * occurred
+ */
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_EVENT_GPIO_GPIO31 0x20
u32 mbi_date; /* 0x80 */
u32 misc_sig; /* 0x84 */
/* Define the GPIO mapping to switch i2c mux */
@@ -543,6 +674,81 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
+ /* Interrupt signal used for SMBus/I2C management interface
+ * 0 = Interrupt event occurred
+ * 1 = Normal
+ */
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_I2C_INTERRUPT_GPIO_GPIO31 0x20
+ /* Set aLOM FAN on GPIO */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_GPIO_GPIO31 0x20
u32 device_capabilities; /* 0x88 */
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
@@ -578,11 +784,263 @@ struct nvm_cfg1_glob {
#define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X50G 0x40
#define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_BB_1X100G \
0x80
- u32 reserved[41]; /* 0x9C */
+ #define NVM_CFG1_GLOB_MULTI_NETWORK_MODES_CAPABILITY_2X10G 0x100
+ /* @DPDK */
+ u32 reserved1[12]; /* 0x9C */
+ u32 oem1_number[8]; /* 0xCC */
+ u32 oem2_number[8]; /* 0xEC */
+ u32 mps25_active_txfir_pre; /* 0x10C */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_PRE_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_PRE_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_PRE_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_PRE_OFFSET 24
+ u32 mps25_active_txfir_main; /* 0x110 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_MAIN_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_MAIN_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_MAIN_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_MAIN_OFFSET 24
+ u32 mps25_active_txfir_post; /* 0x114 */
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_LANE0_ACT_TXFIR_POST_OFFSET 0
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_LANE1_ACT_TXFIR_POST_OFFSET 8
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_LANE2_ACT_TXFIR_POST_OFFSET 16
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_LANE3_ACT_TXFIR_POST_OFFSET 24
+ u32 features; /* 0x118 */
+ /* Set the Aux Fan on temperature */
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_ALOM_FAN_ON_AUX_VALUE_OFFSET 0
+ /* Set NC-SI package ID */
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_OFFSET 8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_SLOT_ID_GPIO_GPIO31 0x20
+ /* PMBUS Clock GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_OFFSET 16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SCL_GPIO_GPIO31 0x20
+ /* PMBUS Data GPIO */
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_OFFSET 24
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_PMBUS_SDA_GPIO_GPIO31 0x20
+ u32 tx_rx_eq_25g_hlpc; /* 0x11C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_HLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_HLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_HLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_HLPC_OFFSET 24
+ u32 tx_rx_eq_25g_llpc; /* 0x120 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_LLPC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_LLPC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_LLPC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_LLPC_OFFSET 24
+ u32 tx_rx_eq_25g_ac; /* 0x124 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_AC_OFFSET 24
+ u32 tx_rx_eq_10g_pc; /* 0x128 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_PC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_PC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_PC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_PC_OFFSET 24
+ u32 tx_rx_eq_10g_ac; /* 0x12C */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_AC_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_AC_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_AC_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_AC_OFFSET 24
+ u32 tx_rx_eq_1g; /* 0x130 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_1G_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_1G_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_1G_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_1G_OFFSET 24
+ u32 tx_rx_eq_25g_bt; /* 0x134 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_25G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_25G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_25G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_25G_BT_OFFSET 24
+ u32 tx_rx_eq_10g_bt; /* 0x138 */
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_INDEX0_RX_TX_EQ_10G_BT_OFFSET 0
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_MASK 0x0000FF00
+ #define NVM_CFG1_GLOB_INDEX1_RX_TX_EQ_10G_BT_OFFSET 8
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_MASK 0x00FF0000
+ #define NVM_CFG1_GLOB_INDEX2_RX_TX_EQ_10G_BT_OFFSET 16
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_MASK 0xFF000000
+ #define NVM_CFG1_GLOB_INDEX3_RX_TX_EQ_10G_BT_OFFSET 24
+ u32 generic_cont4; /* 0x13C */
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_MASK 0x000000FF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_OFFSET 0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_NA 0x0
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO0 0x1
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO1 0x2
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO2 0x3
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO3 0x4
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO4 0x5
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO5 0x6
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO6 0x7
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO7 0x8
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO8 0x9
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO9 0xA
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO10 0xB
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO11 0xC
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO12 0xD
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO13 0xE
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO14 0xF
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO15 0x10
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO16 0x11
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO17 0x12
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO18 0x13
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO19 0x14
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO20 0x15
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO21 0x16
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO22 0x17
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO23 0x18
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO24 0x19
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO25 0x1A
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO26 0x1B
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO27 0x1C
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO28 0x1D
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO29 0x1E
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO30 0x1F
+ #define NVM_CFG1_GLOB_THERMAL_ALARM_GPIO_GPIO31 0x20
+ u32 reserved[58]; /* 0x140 */
};
struct nvm_cfg1_path {
- u32 reserved[30]; /* 0x0 */
+ u32 reserved[1]; /* 0x0 */
};
struct nvm_cfg1_port {
@@ -621,6 +1079,44 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+ /* GPIO for HW reset the PHY. In case it is the same for all ports,
+ * need to set same value for all ports
+ */
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_MASK 0xFF000000
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_OFFSET 24
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_NA 0x0
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO0 0x1
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO1 0x2
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO2 0x3
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO3 0x4
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO4 0x5
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO5 0x6
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO6 0x7
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO7 0x8
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO8 0x9
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO9 0xA
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO10 0xB
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO11 0xC
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO12 0xD
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO13 0xE
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO14 0xF
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO15 0x10
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO16 0x11
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO17 0x12
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO18 0x13
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO19 0x14
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO20 0x15
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO21 0x16
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO22 0x17
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO23 0x18
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO24 0x19
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO25 0x1A
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO26 0x1B
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO27 0x1C
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO28 0x1D
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO29 0x1E
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO30 0x1F
+ #define NVM_CFG1_PORT_EXT_PHY_RESET_GPIO31 0x20
u32 pcie_cfg; /* 0xC */
#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
@@ -697,6 +1193,16 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
+ #define NVM_CFG1_PORT_FEC_AN_MODE_MASK 0x00700000
+ #define NVM_CFG1_PORT_FEC_AN_MODE_OFFSET 20
+ #define NVM_CFG1_PORT_FEC_AN_MODE_NONE 0x0
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_FIRECODE 0x1
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE 0x2
+ #define NVM_CFG1_PORT_FEC_AN_MODE_10G_AND_25G_FIRECODE 0x3
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_RS 0x4
+ #define NVM_CFG1_PORT_FEC_AN_MODE_25G_FIRECODE_AND_RS 0x5
+ #define NVM_CFG1_PORT_FEC_AN_MODE_ALL 0x6
u32 phy_cfg; /* 0x1C */
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
@@ -736,9 +1242,16 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
- #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
+ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM8485X 0x1
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
+ /* EEE power saving mode */
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00FF0000
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+ #define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
u32 mba_cfg1; /* 0x28 */
#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
@@ -970,6 +1483,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_10G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_25g_cap; /* 0x58 */
#define NVM_CFG1_PORT_MNM_25G_DRV_SPEED_CAPABILITY_MASK_MASK \
0x0000FFFF
@@ -1047,6 +1561,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_25G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_40g_cap; /* 0x64 */
#define NVM_CFG1_PORT_MNM_40G_DRV_SPEED_CAPABILITY_MASK_MASK \
0x0000FFFF
@@ -1124,6 +1639,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_40G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_50g_cap; /* 0x70 */
#define NVM_CFG1_PORT_MNM_50G_DRV_SPEED_CAPABILITY_MASK_MASK \
0x0000FFFF
@@ -1203,6 +1719,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_50G_FEC_FORCE_MODE_AUTO 0x7
u32 mnm_100g_cap; /* 0x7C */
#define NVM_CFG1_PORT_MNM_100G_DRV_SPEED_CAP_MASK_MASK \
0x0000FFFF
@@ -1277,6 +1794,7 @@ struct nvm_cfg1_port {
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_NONE 0x0
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_FIRECODE 0x1
#define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_RS 0x2
+ #define NVM_CFG1_PORT_MNM_100G_FEC_FORCE_MODE_AUTO 0x7
u32 reserved[116]; /* 0x88 */
};
@@ -1387,12 +1905,17 @@ struct nvm_cfg1_func {
#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0
#define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000
#define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_MASK 0x001E0000
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_OFFSET 17
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ETHERNET 0x1
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_FCOE 0x2
+ #define NVM_CFG1_FUNC_NPAR_ENABLED_PROTOCOL_ISCSI 0x4
u32 reserved[8]; /* 0x30 */
};
struct nvm_cfg1 {
struct nvm_cfg1_glob glob; /* 0x0 */
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x228 */
struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
};
diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h
index 3c369aa5..f9920f37 100644
--- a/drivers/net/qede/base/reg_addr.h
+++ b/drivers/net/qede/base/reg_addr.h
@@ -1141,3 +1141,62 @@
#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR 0x50196cUL
#define PRS_REG_MSG_INFO 0x1f0a1cUL
#define BAR0_MAP_REG_XSDM_RAM 0x1e00000UL
+
+/* 8.18.7.0 FW */
+#define BRB_REG_INT_MASK_10 0x3401b8UL
+
+#define IGU_REG_PRODUCER_MEMORY 0x182000UL
+#define IGU_REG_CONSUMER_MEM 0x183000UL
+
+#define CDU_REG_CCFC_CTX_VALID0 0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 0x580408UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 0x009150UL
+#define CNIG_REG_NW_PORT_MODE_BB 0x218200UL
+#define CNIG_REG_PMEG_IF_CMD_BB 0x21821cUL
+#define CNIG_REG_PMEG_IF_ADDR_BB 0x218224UL
+#define CNIG_REG_PMEG_IF_WRDATA_BB 0x218228UL
+#define NWM_REG_MAC0_K2_E5 0x800400UL
+#define CNIG_REG_NIG_PORT0_CONF_K2_E5 0x218200UL
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_K2_E5_SHIFT 0
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_K2_E5_SHIFT 1
+#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_K2_E5_SHIFT 3
+#define ETH_MAC_REG_XIF_MODE_K2_E5 0x000080UL
+#define ETH_MAC_REG_XIF_MODE_XGMII_K2_E5_SHIFT 0
+#define ETH_MAC_REG_FRM_LENGTH_K2_E5 0x000014UL
+#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_IPG_LENGTH_K2_E5 0x000044UL
+#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_K2_E5_SHIFT 0
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_K2_E5 0x00001cUL
+#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_K2_E5 0x000020UL
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_K2_E5_SHIFT 16
+#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_K2_E5_SHIFT 0
+#define ETH_MAC_REG_COMMAND_CONFIG_K2_E5 0x000008UL
+#define MISC_REG_XMAC_CORE_PORT_MODE_BB 0x008c08UL
+#define MISC_REG_XMAC_PHY_PORT_MODE_BB 0x008c04UL
+#define XMAC_REG_MODE_BB 0x210008UL
+#define XMAC_REG_RX_MAX_SIZE_BB 0x210040UL
+#define XMAC_REG_TX_CTRL_LO_BB 0x210020UL
+#define XMAC_REG_CTRL_BB 0x210000UL
+#define XMAC_REG_CTRL_TX_EN_BB (0x1 << 0)
+#define XMAC_REG_CTRL_RX_EN_BB (0x1 << 1)
+#define XMAC_REG_RX_CTRL_BB 0x210030UL
+#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE_BB (0x1 << 12)
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2_E5 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2_E5 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2_E5 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2_E5 0x2aafa4UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB 0x2aa410UL
+#define MISCS_REG_FUNCTION_HIDE_BB_K2 0x0096f0UL
+#define PCIE_REG_PRTY_MASK_K2_E5 0x0547b4UL
+#define PGLUE_B_REG_VF_BAR0_SIZE_K2_E5 0x2aaeb4UL
+
+#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c
index 30fded0f..a3c0b137 100644
--- a/drivers/net/qede/qede_eth_if.c
+++ b/drivers/net/qede/qede_eth_if.c
@@ -18,8 +18,8 @@ qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
u8 tx_switching = 0;
struct ecore_sp_vport_start_params start = { 0 };
- start.tpa_mode = p_params->gro_enable ? ECORE_TPA_MODE_GRO :
- ECORE_TPA_MODE_NONE;
+ start.tpa_mode = p_params->enable_lro ? ECORE_TPA_MODE_RSC :
+ ECORE_TPA_MODE_NONE;
start.remove_inner_vlan = p_params->remove_inner_vlan;
start.tx_switching = tx_switching;
start.only_untagged = false; /* untagged only */
@@ -29,7 +29,6 @@ qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
start.handle_ptp_pkts = p_params->handle_ptp_pkts;
start.vport_id = p_params->vport_id;
- start.max_buffers_per_cqe = 16; /* TODO-is this right */
start.mtu = p_params->mtu;
/* @DPDK - Disable FW placement */
start.zero_placement_offset = 1;
@@ -93,58 +92,7 @@ qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
sp_params.mtu = params->mtu;
-
- /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
- * We need to re-fix the rss values per engine for CMT.
- */
-
- if (edev->num_hwfns > 1 && params->update_rss_flg) {
- struct qed_update_vport_rss_params *rss = &params->rss_params;
- int k, max = 0;
-
- /* Find largest entry, since it's possible RSS needs to
- * be disabled [in case only 1 queue per-hwfn]
- */
- for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
- max = (max > rss->rss_ind_table[k]) ?
- max : rss->rss_ind_table[k];
-
- /* Either fix RSS values or disable RSS */
- if (edev->num_hwfns < max + 1) {
- int divisor = (max + edev->num_hwfns - 1) /
- edev->num_hwfns;
-
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "CMT - fixing RSS values (modulo %02x)\n",
- divisor);
-
- for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
- rss->rss_ind_table[k] =
- rss->rss_ind_table[k] % divisor;
- } else {
- DP_VERBOSE(edev, ECORE_MSG_SPQ,
- "CMT - 1 queue per-hwfn; Disabling RSS\n");
- params->update_rss_flg = 0;
- }
- }
-
- /* Now, update the RSS configuration for actual configuration */
- if (params->update_rss_flg) {
- sp_rss_params.update_rss_config = 1;
- sp_rss_params.rss_enable = 1;
- sp_rss_params.update_rss_capabilities = 1;
- sp_rss_params.update_rss_ind_table = 1;
- sp_rss_params.update_rss_key = 1;
- sp_rss_params.rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
- ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
- sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
- rte_memcpy(sp_rss_params.rss_ind_table,
- params->rss_params.rss_ind_table,
- ECORE_RSS_IND_TABLE_SIZE * sizeof(uint16_t));
- rte_memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
- ECORE_RSS_KEY_SIZE * sizeof(uint32_t));
- sp_params.rss_params = &sp_rss_params;
- }
+ sp_params.sge_tpa_params = params->sge_tpa_params;
for_each_hwfn(edev, i) {
struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
@@ -173,7 +121,8 @@ qed_start_rxq(struct ecore_dev *edev,
uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
+ uint16_t cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *ret_params)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
@@ -184,12 +133,14 @@ qed_start_rxq(struct ecore_dev *edev,
p_params->queue_id = p_params->queue_id / edev->num_hwfns;
p_params->stats_id = p_params->vport_id;
- rc = ecore_sp_eth_rx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- bd_max_bytes,
- bd_chain_phys_addr,
- cqe_pbl_addr, cqe_pbl_size, pp_prod);
+ rc = ecore_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size,
+ ret_params);
if (rc) {
DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
@@ -205,19 +156,17 @@ qed_start_rxq(struct ecore_dev *edev,
}
static int
-qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
+qed_stop_rxq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
{
int rc, hwfn_index;
struct ecore_hwfn *p_hwfn;
- hwfn_index = params->rss_id % edev->num_hwfns;
+ hwfn_index = rss_id % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
- rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
- params->rx_queue_id / edev->num_hwfns,
- params->eq_completion_only, false);
+ rc = ecore_eth_rx_queue_stop(p_hwfn, handle, true, false);
if (rc) {
- DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
+ DP_ERR(edev, "Failed to stop RXQ#%02x\n", rss_id);
return rc;
}
@@ -229,7 +178,8 @@ qed_start_txq(struct ecore_dev *edev,
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
+ uint16_t pbl_size,
+ struct ecore_txq_start_ret_params *ret_params)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
@@ -240,11 +190,11 @@ qed_start_txq(struct ecore_dev *edev,
p_params->queue_id = p_params->queue_id / edev->num_hwfns;
p_params->stats_id = p_params->vport_id;
- rc = ecore_sp_eth_tx_queue_start(p_hwfn,
- p_hwfn->hw_info.opaque_fid,
- p_params,
- 0 /* tc */,
- pbl_addr, pbl_size, pp_doorbell);
+ rc = ecore_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params, 0 /* tc */,
+ pbl_addr, pbl_size,
+ ret_params);
if (rc) {
DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
@@ -260,18 +210,17 @@ qed_start_txq(struct ecore_dev *edev,
}
static int
-qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params)
+qed_stop_txq(struct ecore_dev *edev, uint8_t rss_id, void *handle)
{
struct ecore_hwfn *p_hwfn;
int rc, hwfn_index;
- hwfn_index = params->rss_id % edev->num_hwfns;
+ hwfn_index = rss_id % edev->num_hwfns;
p_hwfn = &edev->hwfns[hwfn_index];
- rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
- params->tx_queue_id / edev->num_hwfns);
+ rc = ecore_eth_tx_queue_stop(p_hwfn, handle);
if (rc) {
- DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
+ DP_ERR(edev, "Failed to stop TXQ#%02x\n", rss_id);
return rc;
}
diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h
index ef4a4b55..097e0257 100644
--- a/drivers/net/qede/qede_eth_if.h
+++ b/drivers/net/qede/qede_eth_if.h
@@ -41,20 +41,10 @@ struct qed_dev_eth_info {
struct ether_addr port_mac;
uint16_t num_vlan_filters;
- uint32_t num_mac_addrs;
-};
-
-struct qed_update_vport_rss_params {
- uint16_t rss_ind_table[128];
- uint32_t rss_key[10];
- u8 rss_caps;
-};
+ uint32_t num_mac_filters;
-struct qed_stop_rxq_params {
- uint8_t rss_id;
- uint8_t rx_queue_id;
- uint8_t vport_id;
- bool eq_completion_only;
+ /* Legacy VF - this affects the datapath */
+ bool is_legacy;
};
struct qed_update_vport_params {
@@ -68,25 +58,20 @@ struct qed_update_vport_params {
uint8_t update_accept_any_vlan_flg;
uint8_t accept_any_vlan;
uint8_t update_rss_flg;
- struct qed_update_vport_rss_params rss_params;
uint16_t mtu;
+ struct ecore_sge_tpa_params *sge_tpa_params;
};
struct qed_start_vport_params {
bool remove_inner_vlan;
bool handle_ptp_pkts;
- bool gro_enable;
+ bool enable_lro;
bool drop_ttl0;
uint8_t vport_id;
uint16_t mtu;
bool clear_stats;
};
-struct qed_stop_txq_params {
- uint8_t rss_id;
- uint8_t tx_queue_id;
-};
-
struct qed_eth_ops {
const struct qed_common_ops *common;
@@ -107,19 +92,21 @@ struct qed_eth_ops {
uint16_t bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod);
+ uint16_t cqe_pbl_size,
+ struct ecore_rxq_start_ret_params *ret_params);
int (*q_rx_stop)(struct ecore_dev *edev,
- struct qed_stop_rxq_params *params);
+ uint8_t rss_id, void *handle);
int (*q_tx_start)(struct ecore_dev *edev,
uint8_t rss_num,
struct ecore_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell);
+ uint16_t pbl_size,
+ struct ecore_txq_start_ret_params *ret_params);
int (*q_tx_stop)(struct ecore_dev *edev,
- struct qed_stop_txq_params *params);
+ uint8_t rss_id, void *handle);
int (*eth_cqe_completion)(struct ecore_dev *edev,
uint8_t rss_id,
diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c
index 6d6fb9de..7501eb20 100644
--- a/drivers/net/qede/qede_ethdev.c
+++ b/drivers/net/qede/qede_ethdev.c
@@ -12,9 +12,113 @@
/* Globals */
static const struct qed_eth_ops *qed_ops;
-static const char *drivername = "qede pmd";
static int64_t timer_period = 1;
+/* VXLAN tunnel classification mapping */
+const struct _qede_vxlan_tunn_types {
+ uint16_t rte_filter_type;
+ enum ecore_filter_ucast_type qede_type;
+ enum ecore_tunn_clss qede_tunn_clss;
+ const char *string;
+} qede_tunn_types[] = {
+ {
+ ETH_TUNNEL_FILTER_OMAC,
+ ECORE_FILTER_MAC,
+ ECORE_TUNN_CLSS_MAC_VLAN,
+ "outer-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_VNI,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_VLAN,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID,
+ ECORE_FILTER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_MAC_VNI,
+ "outer-mac and vni"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-mac"
+ },
+ {
+ ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-mac and inner-vlan"
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IMAC,
+ ECORE_FILTER_INNER_MAC_VNI_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VNI,
+ "vni and inner-mac",
+ },
+ {
+ ETH_TUNNEL_FILTER_TENID | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "vni and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
+ ECORE_FILTER_INNER_PAIR,
+ ECORE_TUNN_CLSS_INNER_MAC_VLAN,
+ "inner-mac and inner-vlan",
+ },
+ {
+ ETH_TUNNEL_FILTER_OIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "outer-IP"
+ },
+ {
+ ETH_TUNNEL_FILTER_IIP,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "inner-IP"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_IVLAN_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_IMAC_TENID,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "IMAC_TENID"
+ },
+ {
+ RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,
+ ECORE_FILTER_UNUSED,
+ MAX_ECORE_TUNN_CLSS,
+ "OMAC_TENID_IMAC"
+ },
+};
+
struct rte_qede_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint64_t offset;
@@ -175,14 +279,14 @@ static void qede_interrupt_action(struct ecore_hwfn *p_hwfn)
}
static void
-qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param)
+qede_interrupt_handler(void *param)
{
struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param;
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
qede_interrupt_action(ECORE_LEADING_HWFN(edev));
- if (rte_intr_enable(&eth_dev->pci_dev->intr_handle))
+ if (rte_intr_enable(eth_dev->intr_handle))
DP_ERR(edev, "rte_intr_enable failed\n");
}
@@ -194,6 +298,7 @@ qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info)
qdev->ops = qed_ops;
}
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
static void qede_print_adapter_info(struct qede_dev *qdev)
{
struct ecore_dev *edev = &qdev->edev;
@@ -222,6 +327,7 @@ static void qede_print_adapter_info(struct qede_dev *qdev)
DP_INFO(edev, " Firmware file : %s\n", fw_file);
DP_INFO(edev, "*********************************\n");
}
+#endif
static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
{
@@ -231,6 +337,17 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast)
/* ucast->assert_on_error = true; - For debug */
}
+static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn,
+ uint8_t clss, bool mode, bool mask)
+{
+ memset(p_tunn, 0, sizeof(struct ecore_tunnel_info));
+ p_tunn->vxlan.b_update_mode = mode;
+ p_tunn->vxlan.b_mode_enabled = mask;
+ p_tunn->b_update_rx_cls = true;
+ p_tunn->b_update_tx_cls = true;
+ p_tunn->vxlan.tun_cls = clss;
+}
+
static int
qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
bool add)
@@ -261,13 +378,15 @@ qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
}
ether_addr_copy(mac_addr, &u->mac);
u->vlan = ucast->vlan;
+ u->vni = ucast->vni;
SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list);
qdev->num_uc_addr++;
} else {
SLIST_FOREACH(tmp, &qdev->uc_list_head, list) {
if ((memcmp(mac_addr, &tmp->mac,
ETHER_ADDR_LEN) == 0) &&
- ucast->vlan == tmp->vlan)
+ ucast->vlan == tmp->vlan &&
+ ucast->vni == tmp->vni)
break;
}
if (tmp == NULL) {
@@ -368,7 +487,8 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
}
} else { /* Unicast */
if (add) {
- if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) {
+ if (qdev->num_uc_addr >=
+ qdev->dev_info.num_mac_filters) {
DP_ERR(edev,
"Ucast filter table limit exceeded,"
" Please enable promisc mode\n");
@@ -388,16 +508,18 @@ qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast,
return rc;
}
-static void
+static int
qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr,
- uint32_t index, __rte_unused uint32_t pool)
+ __rte_unused uint32_t index, __rte_unused uint32_t pool)
{
struct ecore_filter_ucast ucast;
+ int re;
qede_set_ucast_cmn_params(&ucast);
ucast.type = ECORE_FILTER_MAC;
ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
- (void)qede_mac_int_ops(eth_dev, &ucast, 1);
+ re = (int)qede_mac_int_ops(eth_dev, &ucast, 1);
+ return re;
}
static void
@@ -405,15 +527,13 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct ether_addr mac_addr;
struct ecore_filter_ucast ucast;
- int rc;
PMD_INIT_FUNC_TRACE(edev);
- if (index >= qdev->dev_info.num_mac_addrs) {
+ if (index >= qdev->dev_info.num_mac_filters) {
DP_ERR(edev, "Index %u is above MAC filter limit %u\n",
- index, qdev->dev_info.num_mac_addrs);
+ index, qdev->dev_info.num_mac_filters);
return;
}
@@ -433,8 +553,6 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
{
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct ecore_filter_ucast ucast;
- int rc;
if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev),
mac_addr->addr_bytes)) {
@@ -444,29 +562,7 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr)
return;
}
- /* First remove the primary mac */
- qede_set_ucast_cmn_params(&ucast);
- ucast.opcode = ECORE_FILTER_REMOVE;
- ucast.type = ECORE_FILTER_MAC;
- ether_addr_copy(&qdev->primary_mac,
- (struct ether_addr *)&ucast.mac);
- rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
- if (rc != 0) {
- DP_ERR(edev, "Unable to remove current macaddr"
- " Reverting to previous default mac\n");
- ether_addr_copy(&qdev->primary_mac,
- &eth_dev->data->mac_addrs[0]);
- return;
- }
-
- /* Add new MAC */
- ucast.opcode = ECORE_FILTER_ADD;
- ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac);
- rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
- if (rc != 0)
- DP_ERR(edev, "Unable to add new default mac\n");
- else
- ether_addr_copy(mac_addr, &qdev->primary_mac);
+ qede_mac_addr_add(eth_dev, mac_addr, 0, 0);
}
static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action)
@@ -510,50 +606,11 @@ static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping)
DP_ERR(edev, "Update V-PORT failed %d\n", rc);
return rc;
}
+ qdev->vlan_strip_flg = set_stripping;
return 0;
}
-static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
-{
- struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
- struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
-
- if (mask & ETH_VLAN_STRIP_MASK) {
- if (rxmode->hw_vlan_strip)
- (void)qede_vlan_stripping(eth_dev, 1);
- else
- (void)qede_vlan_stripping(eth_dev, 0);
- }
-
- if (mask & ETH_VLAN_FILTER_MASK) {
- /* VLAN filtering kicks in when a VLAN is added */
- if (rxmode->hw_vlan_filter) {
- qede_vlan_filter_set(eth_dev, 0, 1);
- } else {
- if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
- DP_NOTICE(edev, false,
- " Please remove existing VLAN filters"
- " before disabling VLAN filtering\n");
- /* Signal app that VLAN filtering is still
- * enabled
- */
- rxmode->hw_vlan_filter = true;
- } else {
- qede_vlan_filter_set(eth_dev, 0, 0);
- }
- }
- }
-
- if (mask & ETH_VLAN_EXTEND_MASK)
- DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
- " and classification is based on outer tag only\n");
-
- DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
- mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
-}
-
static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
uint16_t vlan_id, int on)
{
@@ -567,7 +624,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
if (on) {
if (qdev->configured_vlans == dev_info->num_vlan_filters) {
- DP_INFO(edev, "Reached max VLAN filter limit"
+ DP_ERR(edev, "Reached max VLAN filter limit"
" enabling accept_any_vlan\n");
qede_config_accept_any_vlan(qdev, true);
return 0;
@@ -644,6 +701,46 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
return rc;
}
+static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
+
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ if (rxmode->hw_vlan_strip)
+ (void)qede_vlan_stripping(eth_dev, 1);
+ else
+ (void)qede_vlan_stripping(eth_dev, 0);
+ }
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ /* VLAN filtering kicks in when a VLAN is added */
+ if (rxmode->hw_vlan_filter) {
+ qede_vlan_filter_set(eth_dev, 0, 1);
+ } else {
+ if (qdev->configured_vlans > 1) { /* Excluding VLAN0 */
+ DP_ERR(edev,
+ " Please remove existing VLAN filters"
+ " before disabling VLAN filtering\n");
+ /* Signal app that VLAN filtering is still
+ * enabled
+ */
+ rxmode->hw_vlan_filter = true;
+ } else {
+ qede_vlan_filter_set(eth_dev, 0, 0);
+ }
+ }
+ }
+
+ if (mask & ETH_VLAN_EXTEND_MASK)
+ DP_INFO(edev, "No offloads are supported with VLAN Q-in-Q"
+ " and classification is based on outer tag only\n");
+
+ DP_INFO(edev, "vlan offload mask %d vlan-strip %d vlan-filter %d\n",
+ mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter);
+}
+
static int qede_init_vport(struct qede_dev *qdev)
{
struct ecore_dev *edev = &qdev->edev;
@@ -651,7 +748,7 @@ static int qede_init_vport(struct qede_dev *qdev)
int rc;
start.remove_inner_vlan = 1;
- start.gro_enable = 0;
+ start.enable_lro = qdev->enable_lro;
start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD;
start.vport_id = 0;
start.drop_ttl0 = false;
@@ -671,12 +768,62 @@ static int qede_init_vport(struct qede_dev *qdev)
return 0;
}
+static void qede_prandom_bytes(uint32_t *buff)
+{
+ uint8_t i;
+
+ srand((unsigned int)time(NULL));
+ for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
+ buff[i] = rand();
+}
+
+int qede_config_rss(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+#endif
+ uint32_t def_rss_key[ECORE_RSS_KEY_SIZE];
+ struct rte_eth_rss_reta_entry64 reta_conf[2];
+ struct rte_eth_rss_conf rss_conf;
+ uint32_t i, id, pos, q;
+
+ rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ if (!rss_conf.rss_key) {
+ DP_INFO(edev, "Applying driver default key\n");
+ rss_conf.rss_key_len = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
+ qede_prandom_bytes(&def_rss_key[0]);
+ rss_conf.rss_key = (uint8_t *)&def_rss_key[0];
+ }
+
+ /* Configure RSS hash */
+ if (qede_rss_hash_update(eth_dev, &rss_conf))
+ return -EINVAL;
+
+ /* Configure default RETA */
+ memset(reta_conf, 0, sizeof(reta_conf));
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
+ reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX;
+
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ id = i / RTE_RETA_GROUP_SIZE;
+ pos = i % RTE_RETA_GROUP_SIZE;
+ q = i % QEDE_RSS_COUNT(qdev);
+ reta_conf[id].reta[pos] = q;
+ }
+ if (qede_rss_reta_update(eth_dev, &reta_conf[0],
+ ECORE_RSS_IND_TABLE_SIZE))
+ return -EINVAL;
+
+ return 0;
+}
+
static int qede_dev_configure(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct rte_eth_rxmode *rxmode = &eth_dev->data->dev_conf.rxmode;
- int rc, i, j;
+ int rc;
PMD_INIT_FUNC_TRACE(edev);
@@ -684,14 +831,13 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (edev->num_hwfns > 1) {
if (eth_dev->data->nb_rx_queues < 2 ||
eth_dev->data->nb_tx_queues < 2) {
- DP_NOTICE(edev, false,
- "100G mode needs min. 2 RX/TX queues\n");
+ DP_ERR(edev, "100G mode needs min. 2 RX/TX queues\n");
return -EINVAL;
}
if ((eth_dev->data->nb_rx_queues % 2 != 0) ||
(eth_dev->data->nb_tx_queues % 2 != 0)) {
- DP_NOTICE(edev, false,
+ DP_ERR(edev,
"100G mode needs even no. of RX/TX queues\n");
return -EINVAL;
}
@@ -701,11 +847,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rxmode->enable_scatter == 1)
eth_dev->data->scattered_rx = 1;
- if (rxmode->enable_lro == 1) {
- DP_INFO(edev, "LRO is not supported\n");
- return -EINVAL;
- }
-
if (!rxmode->hw_strip_crc)
DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n");
@@ -713,6 +854,13 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled "
"in hw\n");
+ if (rxmode->enable_lro) {
+ qdev->enable_lro = true;
+ /* Enable scatter mode for LRO */
+ if (!rxmode->enable_scatter)
+ eth_dev->data->scattered_rx = 1;
+ }
+
/* Check for the port restart case */
if (qdev->state != QEDE_DEV_INIT) {
rc = qdev->ops->vport_stop(edev, 0);
@@ -739,11 +887,24 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
if (rc != 0)
return rc;
- SLIST_INIT(&qdev->vlan_list_head);
+ if (!(rxmode->mq_mode == ETH_MQ_RX_RSS ||
+ rxmode->mq_mode == ETH_MQ_RX_NONE)) {
+ DP_ERR(edev, "Unsupported RSS mode\n");
+ qdev->ops->vport_stop(edev, 0);
+ qede_dealloc_fp_resc(eth_dev);
+ return -EINVAL;
+ }
- /* Add primary mac for PF */
- if (IS_PF(edev))
- qede_mac_addr_set(eth_dev, &qdev->primary_mac);
+ /* Flow director mode check */
+ rc = qede_check_fdir_support(eth_dev);
+ if (rc) {
+ qdev->ops->vport_stop(edev, 0);
+ qede_dealloc_fp_resc(eth_dev);
+ return -EINVAL;
+ }
+ SLIST_INIT(&qdev->fdir_info.fdir_list_head);
+
+ SLIST_INIT(&qdev->vlan_list_head);
/* Enable VLAN offloads by default */
qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK |
@@ -763,13 +924,15 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
static const struct rte_eth_desc_lim qede_rx_desc_lim = {
.nb_max = NUM_RX_BDS_MAX,
.nb_min = 128,
- .nb_align = 128 /* lowest common multiple */
+ .nb_align = 128 /* lowest common multiple */
};
static const struct rte_eth_desc_lim qede_tx_desc_lim = {
.nb_max = NUM_TX_BDS_MAX,
.nb_min = 256,
- .nb_align = 256
+ .nb_align = 256,
+ .nb_seg_max = ETH_TX_MAX_BDS_PER_LSO_PACKET,
+ .nb_mtu_seg_max = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET
};
static void
@@ -783,34 +946,44 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev,
PMD_INIT_FUNC_TRACE(edev);
- dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU +
- QEDE_ETH_OVERHEAD);
+ dev_info->pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE;
dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN;
dev_info->rx_desc_lim = qede_rx_desc_lim;
dev_info->tx_desc_lim = qede_tx_desc_lim;
- dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev);
- dev_info->max_tx_queues = dev_info->max_rx_queues;
- dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs;
- if (IS_VF(edev))
- dev_info->max_vfs = 0;
+
+ if (IS_PF(edev))
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), QEDE_PF_NUM_CONNS / 2);
else
- dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev);
- dev_info->driver_name = qdev->drv_ver;
+ dev_info->max_rx_queues = (uint16_t)RTE_MIN(
+ QEDE_MAX_RSS_CNT(qdev), ECORE_MAX_VF_CHAINS_PER_PF);
+ dev_info->max_tx_queues = dev_info->max_rx_queues;
+
+ dev_info->max_mac_addrs = qdev->dev_info.num_mac_filters;
+ dev_info->max_vfs = 0;
dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE;
+ dev_info->hash_key_size = ECORE_RSS_KEY_SIZE * sizeof(uint32_t);
dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL;
dev_info->default_txconf = (struct rte_eth_txconf) {
.txq_flags = QEDE_TXQ_FLAGS,
};
- dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM);
- dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
- DEV_TX_OFFLOAD_IPV4_CKSUM |
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM);
+ dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO);
+
+ dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT |
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO);
memset(&link, 0, sizeof(struct qed_link_output));
qdev->ops->common->get_link(edev, &link);
@@ -876,10 +1049,12 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete)
static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
+#endif
enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC;
@@ -891,10 +1066,12 @@ static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev)
static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
PMD_INIT_FUNC_TRACE(edev);
+#endif
if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1)
qed_configure_filter_rx_mode(eth_dev,
@@ -926,12 +1103,15 @@ static void qede_poll_sp_sb_cb(void *param)
static void qede_dev_close(struct rte_eth_dev *eth_dev)
{
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
int rc;
PMD_INIT_FUNC_TRACE(edev);
+ qede_fdir_dealloc_resc(eth_dev);
+
/* dev_stop() shall cleanup fp resources in hw but without releasing
* dma memories and sw structures so that dev_start() can be called
* by the app without reconfiguration. However, in dev_close() we
@@ -952,9 +1132,9 @@ static void qede_dev_close(struct rte_eth_dev *eth_dev)
qdev->ops->common->remove(edev);
- rte_intr_disable(&eth_dev->pci_dev->intr_handle);
+ rte_intr_disable(&pci_dev->intr_handle);
- rte_intr_callback_unregister(&eth_dev->pci_dev->intr_handle,
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
if (edev->num_hwfns > 1)
@@ -1008,8 +1188,8 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats)
RTE_ETHDEV_QUEUE_STAT_CNTRS);
txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev),
RTE_ETHDEV_QUEUE_STAT_CNTRS);
- if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) ||
- (txq_stat_cntrs != QEDE_TSS_COUNT(qdev)))
+ if ((rxq_stat_cntrs != (unsigned int)QEDE_RSS_COUNT(qdev)) ||
+ (txq_stat_cntrs != (unsigned int)QEDE_TSS_COUNT(qdev)))
DP_VERBOSE(edev, ECORE_MSG_DEBUG,
"Not all the queue stats will be displayed. Set"
" RTE_ETHDEV_QUEUE_STAT_CNTRS config param"
@@ -1062,7 +1242,8 @@ qede_get_xstats_count(struct qede_dev *qdev) {
static int
qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
- struct rte_eth_xstat_name *xstats_names, unsigned limit)
+ struct rte_eth_xstat_name *xstats_names,
+ __rte_unused unsigned int limit)
{
struct qede_dev *qdev = dev->data->dev_private;
const unsigned int stat_cnt = qede_get_xstats_count(qdev);
@@ -1277,7 +1458,7 @@ qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev)
return NULL;
}
-void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
+static void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
{
*rss_caps = 0;
*rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0;
@@ -1286,87 +1467,182 @@ void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf)
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0;
*rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0;
*rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0;
+ *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0;
}
-static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_conf *rss_conf)
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf)
{
- struct qed_update_vport_params vport_update_params;
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params rss_params;
+ struct ecore_hwfn *p_hwfn;
uint32_t *key = (uint32_t *)rss_conf->rss_key;
uint64_t hf = rss_conf->rss_hf;
- int i;
+ uint8_t len = rss_conf->rss_key_len;
+ uint8_t idx;
+ uint8_t i;
+ int rc;
memset(&vport_update_params, 0, sizeof(vport_update_params));
+ memset(&rss_params, 0, sizeof(rss_params));
+
+ DP_INFO(edev, "RSS hf = 0x%lx len = %u key = %p\n",
+ (unsigned long)hf, len, key);
if (hf != 0) {
- /* Enable RSS */
- qede_init_rss_caps(&qdev->rss_params.rss_caps, hf);
- memcpy(&vport_update_params.rss_params, &qdev->rss_params,
- sizeof(vport_update_params.rss_params));
- if (key)
- memcpy(qdev->rss_params.rss_key, rss_conf->rss_key,
- rss_conf->rss_key_len);
- vport_update_params.update_rss_flg = 1;
- qdev->rss_enabled = 1;
- } else {
- /* Disable RSS */
- qdev->rss_enabled = 0;
+ /* Enabling RSS */
+ DP_INFO(edev, "Enabling rss\n");
+
+ /* RSS caps */
+ qede_init_rss_caps(&rss_params.rss_caps, hf);
+ rss_params.update_rss_capabilities = 1;
+
+ /* RSS hash key */
+ if (key) {
+ if (len > (ECORE_RSS_KEY_SIZE * sizeof(uint32_t))) {
+ DP_ERR(edev, "RSS key length exceeds limit\n");
+ return -EINVAL;
+ }
+ DP_INFO(edev, "Applying user supplied hash key\n");
+ rss_params.update_rss_key = 1;
+ memcpy(&rss_params.rss_key, key, len);
+ }
+ rss_params.rss_enable = 1;
}
- /* If the mapping doesn't fit any supported, return */
- if (qdev->rss_params.rss_caps == 0 && hf != 0)
- return -EINVAL;
-
- DP_INFO(edev, "%s\n", (vport_update_params.update_rss_flg) ?
- "Enabling RSS" : "Disabling RSS");
-
+ rss_params.update_rss_config = 1;
+ /* tbl_size has to be set with capabilities */
+ rss_params.rss_table_size_log = 7;
vport_update_params.vport_id = 0;
+ /* pass the L2 handles instead of qids */
+ for (i = 0 ; i < ECORE_RSS_IND_TABLE_SIZE ; i++) {
+ idx = qdev->rss_ind_table[i];
+ rss_params.rss_ind_table[i] = qdev->fp_array[idx].rxq->handle;
+ }
+ vport_update_params.rss_params = &rss_params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ return rc;
+ }
+ }
+ qdev->rss_enable = rss_params.rss_enable;
+
+ /* Update local structure for hash query */
+ qdev->rss_conf.rss_hf = hf;
+ qdev->rss_conf.rss_key_len = len;
+ if (qdev->rss_enable) {
+ if (qdev->rss_conf.rss_key == NULL) {
+ qdev->rss_conf.rss_key = (uint8_t *)malloc(len);
+ if (qdev->rss_conf.rss_key == NULL) {
+ DP_ERR(edev, "No memory to store RSS key\n");
+ return -ENOMEM;
+ }
+ }
+ if (key && len) {
+ DP_INFO(edev, "Storing RSS key\n");
+ memcpy(qdev->rss_conf.rss_key, key, len);
+ }
+ } else if (!qdev->rss_enable && len == 0) {
+ if (qdev->rss_conf.rss_key) {
+ free(qdev->rss_conf.rss_key);
+ qdev->rss_conf.rss_key = NULL;
+ DP_INFO(edev, "Free RSS key\n");
+ }
+ }
- return qdev->ops->vport_update(edev, &vport_update_params);
+ return 0;
}
-int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
+static int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct qede_dev *qdev = eth_dev->data->dev_private;
- uint64_t hf;
-
- if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key))
- return -EINVAL;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
- if (rss_conf->rss_key)
- memcpy(rss_conf->rss_key, qdev->rss_params.rss_key,
- sizeof(qdev->rss_params.rss_key));
-
- hf = 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ?
- ETH_RSS_IPV4 : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
- ETH_RSS_IPV6 : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ?
- ETH_RSS_IPV6_EX : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ?
- ETH_RSS_NONFRAG_IPV4_TCP : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
- ETH_RSS_NONFRAG_IPV6_TCP : 0;
- hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ?
- ETH_RSS_IPV6_TCP_EX : 0;
-
- rss_conf->rss_hf = hf;
+ rss_conf->rss_hf = qdev->rss_conf.rss_hf;
+ rss_conf->rss_key_len = qdev->rss_conf.rss_key_len;
+ if (rss_conf->rss_key && qdev->rss_conf.rss_key)
+ memcpy(rss_conf->rss_key, qdev->rss_conf.rss_key,
+ rss_conf->rss_key_len);
return 0;
}
-static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
+static bool qede_update_rss_parm_cmt(struct ecore_dev *edev,
+ struct ecore_rss_params *rss)
{
- struct qed_update_vport_params vport_update_params;
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
+ int i, fn;
+ bool rss_mode = 1; /* enable */
+ struct ecore_queue_cid *cid;
+ struct ecore_rss_params *t_rss;
+
+ /* In regular scenario, we'd simply need to take input handlers.
+ * But in CMT, we'd have to split the handlers according to the
+ * engine they were configured on. We'd then have to understand
+ * whether RSS is really required, since 2-queues on CMT doesn't
+ * require RSS.
+ */
+
+ /* CMT should be round-robin */
+ for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) {
+ cid = rss->rss_ind_table[i];
+
+ if (cid->p_owner == ECORE_LEADING_HWFN(edev))
+ t_rss = &rss[0];
+ else
+ t_rss = &rss[1];
+
+ t_rss->rss_ind_table[i / edev->num_hwfns] = cid;
+ }
+
+ t_rss = &rss[1];
+ t_rss->update_rss_ind_table = 1;
+ t_rss->rss_table_size_log = 7;
+ t_rss->update_rss_config = 1;
+
+ /* Make sure RSS is actually required */
+ for_each_hwfn(edev, fn) {
+ for (i = 1; i < ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns;
+ i++) {
+ if (rss[fn].rss_ind_table[i] !=
+ rss[fn].rss_ind_table[0])
+ break;
+ }
+
+ if (i == ECORE_RSS_IND_TABLE_SIZE / edev->num_hwfns) {
+ DP_INFO(edev,
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ rss_mode = 0;
+ goto out;
+ }
+ }
+
+out:
+ t_rss->rss_enable = rss_mode;
+
+ return rss_mode;
+}
+
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_sp_vport_update_params vport_update_params;
+ struct ecore_rss_params *params;
+ struct ecore_hwfn *p_hwfn;
uint16_t i, idx, shift;
+ uint8_t entry;
+ int rc = 0;
if (reta_size > ETH_RSS_RETA_SIZE_128) {
DP_ERR(edev, "reta_size %d is not supported by hardware\n",
@@ -1375,42 +1651,71 @@ static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
}
memset(&vport_update_params, 0, sizeof(vport_update_params));
- memcpy(&vport_update_params.rss_params, &qdev->rss_params,
- sizeof(vport_update_params.rss_params));
+ params = rte_zmalloc("qede_rss", sizeof(*params) * edev->num_hwfns,
+ RTE_CACHE_LINE_SIZE);
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
- uint8_t entry = reta_conf[idx].reta[shift];
- qdev->rss_params.rss_ind_table[i] = entry;
+ entry = reta_conf[idx].reta[shift];
+ /* Pass rxq handles to ecore */
+ params->rss_ind_table[i] =
+ qdev->fp_array[entry].rxq->handle;
+ /* Update the local copy for RETA query command */
+ qdev->rss_ind_table[i] = entry;
}
}
- vport_update_params.update_rss_flg = 1;
+ params->update_rss_ind_table = 1;
+ params->rss_table_size_log = 7;
+ params->update_rss_config = 1;
+
+ /* Fix up RETA for CMT mode device */
+ if (edev->num_hwfns > 1)
+ qdev->rss_enable = qede_update_rss_parm_cmt(edev,
+ params);
vport_update_params.vport_id = 0;
+ /* Use the current value of rss_enable */
+ params->rss_enable = qdev->rss_enable;
+ vport_update_params.rss_params = params;
+
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
+ ECORE_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_ERR(edev, "vport-update for RSS failed\n");
+ goto out;
+ }
+ }
- return qdev->ops->vport_update(edev, &vport_update_params);
+out:
+ rte_free(params);
+ return rc;
}
-int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
+static int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
+ struct ecore_dev *edev = &qdev->edev;
uint16_t i, idx, shift;
+ uint8_t entry;
if (reta_size > ETH_RSS_RETA_SIZE_128) {
- struct ecore_dev *edev = &qdev->edev;
DP_ERR(edev, "reta_size %d is not supported\n",
reta_size);
+ return -EINVAL;
}
for (i = 0; i < reta_size; i++) {
idx = i / RTE_RETA_GROUP_SIZE;
shift = i % RTE_RETA_GROUP_SIZE;
if (reta_conf[idx].mask & (1ULL << shift)) {
- uint8_t entry = qdev->rss_params.rss_ind_table[i];
+ entry = qdev->rss_ind_table[i];
reta_conf[idx].reta[shift] = entry;
}
}
@@ -1418,34 +1723,339 @@ int qede_rss_reta_query(struct rte_eth_dev *eth_dev,
return 0;
}
-int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
- uint32_t frame_size;
- struct qede_dev *qdev = dev->data->dev_private;
+ struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_dev_info dev_info = {0};
+ struct qede_fastpath *fp;
+ uint32_t frame_size;
+ uint16_t rx_buf_size;
+ uint16_t bufsz;
+ int i;
+ PMD_INIT_FUNC_TRACE(edev);
qede_dev_info_get(dev, &dev_info);
-
- /* VLAN_TAG = 4 */
- frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4;
-
- if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
+ frame_size = mtu + QEDE_ETH_OVERHEAD;
+ if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) {
+ DP_ERR(edev, "MTU %u out of range\n", mtu);
return -EINVAL;
-
+ }
if (!dev->data->scattered_rx &&
- frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+ frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+ DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n",
+ dev->data->min_rx_buf_size);
return -EINVAL;
-
+ }
+ /* Temporarily replace I/O functions with dummy ones. It cannot
+ * be set to NULL because rte_eth_rx_burst() doesn't check for NULL.
+ */
+ dev->rx_pkt_burst = qede_rxtx_pkts_dummy;
+ dev->tx_pkt_burst = qede_rxtx_pkts_dummy;
+ qede_dev_stop(dev);
+ rte_delay_ms(1000);
+ qdev->mtu = mtu;
+ /* Fix up RX buf size for all queues of the port */
+ for_each_queue(i) {
+ fp = &qdev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_RX) {
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(
+ fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
+ if (dev->data->scattered_rx)
+ rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
+ else
+ rx_buf_size = mtu + QEDE_ETH_OVERHEAD;
+ rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size);
+ fp->rxq->rx_buf_size = rx_buf_size;
+ DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size);
+ }
+ }
+ qede_dev_start(dev);
if (frame_size > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
else
dev->data->dev_conf.rxmode.jumbo_frame = 0;
-
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
- qdev->mtu = mtu;
- qede_dev_stop(dev);
- qede_dev_start(dev);
+ /* Reassign back */
+ dev->rx_pkt_burst = qede_recv_pkts;
+ dev->tx_pkt_burst = qede_xmit_pkts;
+
+ return 0;
+}
+
+static int
+qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn; /* @DPDK */
+ struct ecore_hwfn *p_hwfn;
+ int rc, i;
+
+ PMD_INIT_FUNC_TRACE(edev);
+
+ memset(&tunn, 0, sizeof(tunn));
+ if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) {
+ tunn.vxlan_port.b_update_port = true;
+ tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port :
+ QEDE_VXLAN_DEF_PORT;
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unable to config UDP port %u\n",
+ tunn.vxlan_port.port);
+ return rc;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+qede_udp_dst_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return qede_conf_udp_dst_port(eth_dev, tunnel_udp, false);
+}
+
+static int
+qede_udp_dst_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tunnel_udp)
+{
+ return qede_conf_udp_dst_port(eth_dev, tunnel_udp, true);
+}
+
+static void qede_get_ecore_tunn_params(uint32_t filter, uint32_t *type,
+ uint32_t *clss, char *str)
+{
+ uint16_t j;
+ *clss = MAX_ECORE_TUNN_CLSS;
+
+ for (j = 0; j < RTE_DIM(qede_tunn_types); j++) {
+ if (filter == qede_tunn_types[j].rte_filter_type) {
+ *type = qede_tunn_types[j].qede_type;
+ *clss = qede_tunn_types[j].qede_tunn_clss;
+ strcpy(str, qede_tunn_types[j].string);
+ return;
+ }
+ }
+}
+
+static int
+qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast,
+ const struct rte_eth_tunnel_filter_conf *conf,
+ uint32_t type)
+{
+ /* Init commmon ucast params first */
+ qede_set_ucast_cmn_params(ucast);
+
+ /* Copy out the required fields based on classification type */
+ ucast->type = type;
+
+ switch (type) {
+ case ECORE_FILTER_VNI:
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_VLAN:
+ ucast->vlan = conf->inner_vlan;
+ break;
+ case ECORE_FILTER_MAC:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_INNER_MAC:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ break;
+ case ECORE_FILTER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->outer_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_MAC_VNI_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vni = conf->tenant_id;
+ break;
+ case ECORE_FILTER_INNER_PAIR:
+ memcpy(ucast->mac, conf->inner_mac.addr_bytes,
+ ETHER_ADDR_LEN);
+ ucast->vlan = conf->inner_vlan;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ECORE_SUCCESS;
+}
+
+static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ const struct rte_eth_tunnel_filter_conf *conf)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct ecore_tunnel_info tunn;
+ struct ecore_hwfn *p_hwfn;
+ enum ecore_filter_ucast_type type;
+ enum ecore_tunn_clss clss;
+ struct ecore_filter_ucast ucast;
+ char str[80];
+ uint16_t filter_type;
+ int rc, i;
+
+ filter_type = conf->filter_type | qdev->vxlan_filter_type;
+ /* First determine if the given filter classification is supported */
+ qede_get_ecore_tunn_params(filter_type, &type, &clss, str);
+ if (clss == MAX_ECORE_TUNN_CLSS) {
+ DP_ERR(edev, "Wrong filter type\n");
+ return -EINVAL;
+ }
+ /* Init tunnel ucast params */
+ rc = qede_set_ucast_tunn_cmn_param(&ucast, conf, type);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Unsupported VxLAN filter type 0x%x\n",
+ conf->filter_type);
+ return rc;
+ }
+ DP_INFO(edev, "Rule: \"%s\", op %d, type 0x%x\n",
+ str, filter_op, ucast.type);
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ucast.opcode = ECORE_FILTER_ADD;
+
+ /* Skip MAC/VLAN if filter is based on VNI */
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, 1);
+ if (rc == 0) {
+ /* Enable accept anyvlan */
+ qede_config_accept_any_vlan(qdev, true);
+ }
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, 1);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ qdev->vxlan_filter_type = filter_type;
+
+ DP_INFO(edev, "Enabling VXLAN tunneling\n");
+ qede_set_cmn_tunn_param(&tunn, clss, true, true);
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn,
+ &tunn, ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev, "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ }
+ }
+ qdev->num_tunn_filters++; /* Filter added successfully */
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ucast.opcode = ECORE_FILTER_REMOVE;
+
+ if (!(filter_type & ETH_TUNNEL_FILTER_TENID)) {
+ rc = qede_mac_int_ops(eth_dev, &ucast, 0);
+ } else {
+ rc = qede_ucast_filter(eth_dev, &ucast, 0);
+ if (rc == 0)
+ rc = ecore_filter_ucast_cmd(edev, &ucast,
+ ECORE_SPQ_MODE_CB, NULL);
+ }
+ if (rc != ECORE_SUCCESS)
+ return rc;
+
+ qdev->vxlan_filter_type = filter_type;
+ qdev->num_tunn_filters--;
+
+ /* Disable VXLAN if VXLAN filters become 0 */
+ if (qdev->num_tunn_filters == 0) {
+ DP_INFO(edev, "Disabling VXLAN tunneling\n");
+
+ /* Use 0 as tunnel mode */
+ qede_set_cmn_tunn_param(&tunn, clss, false, true);
+ for_each_hwfn(edev, i) {
+ p_hwfn = &edev->hwfns[i];
+ rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
+ ECORE_SPQ_MODE_CB, NULL);
+ if (rc != ECORE_SUCCESS) {
+ DP_ERR(edev,
+ "Failed to update tunn_clss %u\n",
+ tunn.vxlan.tun_cls);
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported operation %d\n", filter_op);
+ return -EINVAL;
+ }
+ DP_INFO(edev, "Current VXLAN filters %d\n", qdev->num_tunn_filters);
+
+ return 0;
+}
+
+int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_tunnel_filter_conf *filter_conf =
+ (struct rte_eth_tunnel_filter_conf *)arg;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_TUNNEL:
+ switch (filter_conf->tunnel_type) {
+ case RTE_TUNNEL_TYPE_VXLAN:
+ DP_INFO(edev,
+ "Packet steering to the specified Rx queue"
+ " is not supported with VXLAN tunneling");
+ return(qede_vxlan_tunn_config(eth_dev, filter_op,
+ filter_conf));
+ /* Place holders for future tunneling support */
+ case RTE_TUNNEL_TYPE_GENEVE:
+ case RTE_TUNNEL_TYPE_TEREDO:
+ case RTE_TUNNEL_TYPE_NVGRE:
+ case RTE_TUNNEL_TYPE_IP_IN_GRE:
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ DP_ERR(edev, "Unsupported tunnel type %d\n",
+ filter_conf->tunnel_type);
+ return -EINVAL;
+ case RTE_TUNNEL_TYPE_NONE:
+ default:
+ return 0;
+ }
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ return qede_fdir_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_NTUPLE:
+ return qede_ntuple_filter_conf(eth_dev, filter_op, arg);
+ case RTE_ETH_FILTER_MACVLAN:
+ case RTE_ETH_FILTER_ETHERTYPE:
+ case RTE_ETH_FILTER_FLEXIBLE:
+ case RTE_ETH_FILTER_SYN:
+ case RTE_ETH_FILTER_HASH:
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ case RTE_ETH_FILTER_MAX:
+ default:
+ DP_ERR(edev, "Unsupported filter type %d\n",
+ filter_type);
+ return -EINVAL;
+ }
return 0;
}
@@ -1485,6 +2095,9 @@ static const struct eth_dev_ops qede_eth_dev_ops = {
.reta_update = qede_rss_reta_update,
.reta_query = qede_rss_reta_query,
.mtu_set = qede_set_mtu,
+ .filter_ctrl = qede_dev_filter_ctrl,
+ .udp_tunnel_port_add = qede_udp_dst_port_add,
+ .udp_tunnel_port_del = qede_udp_dst_port_del,
};
static const struct eth_dev_ops qede_eth_vf_dev_ops = {
@@ -1522,9 +2135,10 @@ static const struct eth_dev_ops qede_eth_vf_dev_ops = {
static void qede_update_pf_params(struct ecore_dev *edev)
{
struct ecore_pf_params pf_params;
- /* 32 rx + 32 tx */
+
memset(&pf_params, 0, sizeof(struct ecore_pf_params));
- pf_params.eth_pf_params.num_cons = 64;
+ pf_params.eth_pf_params.num_cons = QEDE_PF_NUM_CONNS;
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
qed_ops->common->update_pf_params(edev, &pf_params);
}
@@ -1544,13 +2158,13 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
/* Fix up ecore debug level */
uint32_t dp_module = ~0 & ~ECORE_MSG_HW;
uint8_t dp_level = ECORE_LEVEL_VERBOSE;
- uint32_t max_mac_addrs;
int rc;
/* Extract key data structures */
adapter = eth_dev->data->dev_private;
edev = &adapter->edev;
- pci_addr = eth_dev->pci_dev->addr;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_addr = pci_dev->addr;
PMD_INIT_FUNC_TRACE(edev);
@@ -1560,6 +2174,7 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->rx_pkt_burst = qede_recv_pkts;
eth_dev->tx_pkt_burst = qede_xmit_pkts;
+ eth_dev->tx_pkt_prepare = qede_xmit_prep_pkts;
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
DP_NOTICE(edev, false,
@@ -1567,8 +2182,6 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
return 0;
}
- pci_dev = eth_dev->pci_dev;
-
rte_eth_copy_pci_info(eth_dev, pci_dev);
/* @DPDK */
@@ -1593,10 +2206,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
qede_update_pf_params(edev);
- rte_intr_callback_register(&eth_dev->pci_dev->intr_handle,
+ rte_intr_callback_register(&pci_dev->intr_handle,
qede_interrupt_handler, (void *)eth_dev);
- if (rte_intr_enable(&eth_dev->pci_dev->intr_handle)) {
+ if (rte_intr_enable(&pci_dev->intr_handle)) {
DP_ERR(edev, "rte_intr_enable() failed\n");
return -ENODEV;
}
@@ -1646,20 +2259,20 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
qede_alloc_etherdev(adapter, &dev_info);
- adapter->ops->common->set_id(edev, edev->name, QEDE_PMD_VERSION);
+ adapter->ops->common->set_name(edev, edev->name);
if (!is_vf)
- adapter->dev_info.num_mac_addrs =
+ adapter->dev_info.num_mac_filters =
(uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev),
ECORE_MAC);
else
ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev),
- &adapter->dev_info.num_mac_addrs);
+ (uint32_t *)&adapter->dev_info.num_mac_filters);
/* Allocate memory for storing MAC addr */
eth_dev->data->mac_addrs = rte_zmalloc(edev->name,
(ETHER_ADDR_LEN *
- adapter->dev_info.num_mac_addrs),
+ adapter->dev_info.num_mac_filters),
RTE_CACHE_LINE_SIZE);
if (eth_dev->data->mac_addrs == NULL) {
@@ -1702,7 +2315,9 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf)
eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops;
if (do_once) {
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
qede_print_adapter_info(adapter);
+#endif
do_once = false;
}
@@ -1760,64 +2375,96 @@ static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev)
return qede_dev_common_uninit(eth_dev);
}
-static struct rte_pci_id pci_id_qedevf_map[] = {
+static const struct rte_pci_id pci_id_qedevf_map[] = {
#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
{
- QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF)
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_VF)
+ },
+ {
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_IOV)
},
{
- QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV)
+ QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_IOV)
},
{.vendor_id = 0,}
};
-static struct rte_pci_id pci_id_qede_map[] = {
+static const struct rte_pci_id pci_id_qede_map[] = {
#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev)
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980E)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_NX2_57980S)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_40)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_25)
},
{
- QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100)
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_100)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_57980S_50)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_50G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_10G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_40G)
+ },
+ {
+ QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_QLOGIC_AH_25G)
},
{.vendor_id = 0,}
};
-static struct eth_driver rte_qedevf_pmd = {
- .pci_drv = {
- .id_table = pci_id_qedevf_map,
- .drv_flags =
- RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = qedevf_eth_dev_init,
- .eth_dev_uninit = qedevf_eth_dev_uninit,
- .dev_private_size = sizeof(struct qede_dev),
+static int qedevf_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qedevf_eth_dev_init);
+}
+
+static int qedevf_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qedevf_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qedevf_pmd = {
+ .id_table = pci_id_qedevf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qedevf_eth_dev_pci_probe,
+ .remove = qedevf_eth_dev_pci_remove,
};
-static struct eth_driver rte_qede_pmd = {
- .pci_drv = {
- .id_table = pci_id_qede_map,
- .drv_flags =
- RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = qede_eth_dev_init,
- .eth_dev_uninit = qede_eth_dev_uninit,
- .dev_private_size = sizeof(struct qede_dev),
+static int qede_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct qede_dev), qede_eth_dev_init);
+}
+
+static int qede_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, qede_eth_dev_uninit);
+}
+
+static struct rte_pci_driver rte_qede_pmd = {
+ .id_table = pci_id_qede_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = qede_eth_dev_pci_probe,
+ .remove = qede_eth_dev_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_qede, rte_qede_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede, pci_id_qede_map);
-RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd.pci_drv);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PCI(net_qede_vf, rte_qedevf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_qede_vf, pci_id_qedevf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_qede_vf, "* igb_uio | vfio");
diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h
index a35ea8bd..e4323a0d 100644
--- a/drivers/net/qede/qede_ethdev.h
+++ b/drivers/net/qede/qede_ethdev.h
@@ -14,7 +14,9 @@
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_dev.h>
+#include <rte_ip.h>
/* ecore includes */
#include "base/bcm_osal.h"
@@ -31,6 +33,11 @@
#include "base/ecore_iov_api.h"
#include "base/ecore_cxt.h"
#include "base/nvm_cfg.h"
+#include "base/ecore_iov_api.h"
+#include "base/ecore_sp_commands.h"
+#include "base/ecore_l2.h"
+#include "base/ecore_dev_api.h"
+#include "base/ecore_l2.h"
#include "qede_logs.h"
#include "qede_if.h"
@@ -43,8 +50,8 @@
/* Driver versions */
#define QEDE_PMD_VER_PREFIX "QEDE PMD"
-#define QEDE_PMD_VERSION_MAJOR 1
-#define QEDE_PMD_VERSION_MINOR 2
+#define QEDE_PMD_VERSION_MAJOR 2
+#define QEDE_PMD_VERSION_MINOR 4
#define QEDE_PMD_VERSION_REVISION 0
#define QEDE_PMD_VERSION_PATCH 1
@@ -89,27 +96,48 @@
struct ecore_dev *edev = &qdev->edev; \
}
-/************* QLogic 25G/40G/100G vendor/devices ids *************/
-#define PCI_VENDOR_ID_QLOGIC 0x1077
-
-#define CHIP_NUM_57980E 0x1634
-#define CHIP_NUM_57980S 0x1629
-#define CHIP_NUM_VF 0x1630
-#define CHIP_NUM_57980S_40 0x1634
-#define CHIP_NUM_57980S_25 0x1656
-#define CHIP_NUM_57980S_IOV 0x1664
-#define CHIP_NUM_57980S_100 0x1644
-
-#define PCI_DEVICE_ID_NX2_57980E CHIP_NUM_57980E
-#define PCI_DEVICE_ID_NX2_57980S CHIP_NUM_57980S
-#define PCI_DEVICE_ID_NX2_VF CHIP_NUM_VF
-#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
-#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
-#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
-#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
+/************* QLogic 10G/25G/40G/50G/100G vendor/devices ids *************/
+#define PCI_VENDOR_ID_QLOGIC 0x1077
+
+#define CHIP_NUM_57980E 0x1634
+#define CHIP_NUM_57980S 0x1629
+#define CHIP_NUM_VF 0x1630
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_57980S_100 0x1644
+#define CHIP_NUM_57980S_50 0x1654
+#define CHIP_NUM_AH_50G 0x8070
+#define CHIP_NUM_AH_10G 0x8071
+#define CHIP_NUM_AH_40G 0x8072
+#define CHIP_NUM_AH_25G 0x8073
+#define CHIP_NUM_AH_IOV 0x8090
+
+#define PCI_DEVICE_ID_QLOGIC_NX2_57980E CHIP_NUM_57980E
+#define PCI_DEVICE_ID_QLOGIC_NX2_57980S CHIP_NUM_57980S
+#define PCI_DEVICE_ID_QLOGIC_NX2_VF CHIP_NUM_VF
+#define PCI_DEVICE_ID_QLOGIC_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_QLOGIC_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_QLOGIC_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_QLOGIC_57980S_100 CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_QLOGIC_57980S_50 CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_QLOGIC_AH_50G CHIP_NUM_AH_50G
+#define PCI_DEVICE_ID_QLOGIC_AH_10G CHIP_NUM_AH_10G
+#define PCI_DEVICE_ID_QLOGIC_AH_40G CHIP_NUM_AH_40G
+#define PCI_DEVICE_ID_QLOGIC_AH_25G CHIP_NUM_AH_25G
+#define PCI_DEVICE_ID_QLOGIC_AH_IOV CHIP_NUM_AH_IOV
+
+
+#define QEDE_VXLAN_DEF_PORT 8472
extern char fw_file[];
+/* Number of PF connections - 32 RX + 32 TX */
+#define QEDE_PF_NUM_CONNS (64)
+
+/* Maximum number of flowdir filters */
+#define QEDE_RFS_MAX_FLTR (256)
+
/* Port/function states */
enum qede_dev_state {
QEDE_DEV_INIT, /* Init the chip and Slowpath */
@@ -131,9 +159,25 @@ struct qede_mcast_entry {
struct qede_ucast_entry {
struct ether_addr mac;
uint16_t vlan;
+ uint16_t vni;
SLIST_ENTRY(qede_ucast_entry) list;
};
+struct qede_fdir_entry {
+ uint32_t soft_id; /* unused for now */
+ uint16_t pkt_len; /* actual packet length to match */
+ uint16_t rx_queue; /* queue to be steered to */
+ const struct rte_memzone *mz; /* mz used to hold L2 frame */
+ SLIST_ENTRY(qede_fdir_entry) list;
+};
+
+struct qede_fdir_info {
+ struct ecore_arfs_config_params arfs;
+ uint16_t filter_count;
+ SLIST_HEAD(fdir_list_head, qede_fdir_entry)fdir_list_head;
+};
+
+
/*
* Structure to store private data for each port.
*/
@@ -146,10 +190,12 @@ struct qede_dev {
struct qede_fastpath *fp_array;
uint8_t num_tc;
uint16_t mtu;
- bool rss_enabled;
- struct qed_update_vport_rss_params rss_params;
- uint32_t flags;
- bool gro_disable;
+ bool rss_enable;
+ struct rte_eth_rss_conf rss_conf;
+ uint16_t rss_ind_table[ECORE_RSS_IND_TABLE_SIZE];
+ uint64_t rss_hf;
+ uint8_t rss_key_len;
+ bool enable_lro;
uint16_t num_queues;
uint8_t fp_num_tx;
uint8_t fp_num_rx;
@@ -163,25 +209,43 @@ struct qede_dev {
SLIST_HEAD(uc_list_head, qede_ucast_entry) uc_list_head;
uint16_t num_uc_addr;
bool handle_hw_err;
+ uint16_t num_tunn_filters;
+ uint16_t vxlan_filter_type;
+ struct qede_fdir_info fdir_info;
+ bool vlan_strip_flg;
char drv_ver[QEDE_PMD_DRV_VER_STR_SIZE];
};
-/* Static functions */
-static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev,
- uint16_t vlan_id, int on);
-
-static int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_conf *rss_conf);
+/* Non-static functions */
+int qede_config_rss(struct rte_eth_dev *eth_dev);
-static int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
- struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size);
+int qede_rss_hash_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_conf *rss_conf);
-/* Non-static functions */
-void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf);
+int qede_rss_reta_update(struct rte_eth_dev *eth_dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size);
int qed_fill_eth_dev_info(struct ecore_dev *edev,
struct qed_dev_eth_info *info);
int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up);
+int qede_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type type,
+ enum rte_filter_op op, void *arg);
+
+int qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg);
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op, void *arg);
+
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev);
+
+uint16_t qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ void *buff,
+ struct ecore_arfs_config_params *params);
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev);
+
#endif /* _QEDE_ETHDEV_H_ */
diff --git a/drivers/net/qede/qede_fdir.c b/drivers/net/qede/qede_fdir.c
new file mode 100644
index 00000000..7bd5c5d6
--- /dev/null
+++ b/drivers/net/qede/qede_fdir.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2017 QLogic Corporation.
+ * All rights reserved.
+ * www.qlogic.com
+ *
+ * See LICENSE.qede_pmd for copyright and licensing details.
+ */
+
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_errno.h>
+
+#include "qede_ethdev.h"
+
+#define IP_VERSION (0x40)
+#define IP_HDRLEN (0x5)
+#define QEDE_FDIR_IP_DEFAULT_VERSION_IHL (IP_VERSION | IP_HDRLEN)
+#define QEDE_FDIR_TCP_DEFAULT_DATAOFF (0x50)
+#define QEDE_FDIR_IPV4_DEF_TTL (64)
+
+/* Sum of length of header types of L2, L3, L4.
+ * L2 : ether_hdr + vlan_hdr + vxlan_hdr
+ * L3 : ipv6_hdr
+ * L4 : tcp_hdr
+ */
+#define QEDE_MAX_FDIR_PKT_LEN (86)
+
+#ifndef IPV6_ADDR_LEN
+#define IPV6_ADDR_LEN (16)
+#endif
+
+#define QEDE_VALID_FLOW(flow_type) \
+ ((flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || \
+ (flow_type) == RTE_ETH_FLOW_NONFRAG_IPV6_UDP)
+
+/* Note: Flowdir support is only partial.
+ * For ex: drop_queue, FDIR masks, flex_conf are not supported.
+ * Parameters like pballoc/status fields are irrelevant here.
+ */
+int qede_check_fdir_support(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_fdir_conf *fdir = &eth_dev->data->dev_conf.fdir_conf;
+
+ /* check FDIR modes */
+ switch (fdir->mode) {
+ case RTE_FDIR_MODE_NONE:
+ qdev->fdir_info.arfs.arfs_enable = false;
+ DP_INFO(edev, "flowdir is disabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT:
+ if (edev->num_hwfns > 1) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ return -ENOTSUP;
+ }
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "flowdir is enabled\n");
+ break;
+ case RTE_FDIR_MODE_PERFECT_TUNNEL:
+ case RTE_FDIR_MODE_SIGNATURE:
+ case RTE_FDIR_MODE_PERFECT_MAC_VLAN:
+ DP_ERR(edev, "Unsupported flowdir mode %d\n", fdir->mode);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+void qede_fdir_dealloc_resc(struct rte_eth_dev *eth_dev)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct qede_fdir_entry *tmp = NULL;
+
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (tmp) {
+ if (tmp->mz)
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp);
+ }
+ }
+}
+
+static int
+qede_config_cmn_fdir_filter(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir_filter,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ char mz_name[RTE_MEMZONE_NAMESIZE] = {0};
+ struct qede_fdir_entry *tmp = NULL;
+ struct qede_fdir_entry *fdir = NULL;
+ const struct rte_memzone *mz;
+ struct ecore_hwfn *p_hwfn;
+ enum _ecore_status_t rc;
+ uint16_t pkt_len;
+ void *pkt;
+
+ if (add) {
+ if (qdev->fdir_info.filter_count == QEDE_RFS_MAX_FLTR - 1) {
+ DP_ERR(edev, "Reached max flowdir filter limit\n");
+ return -EINVAL;
+ }
+ fdir = rte_malloc(NULL, sizeof(struct qede_fdir_entry),
+ RTE_CACHE_LINE_SIZE);
+ if (!fdir) {
+ DP_ERR(edev, "Did not allocate memory for fdir\n");
+ return -ENOMEM;
+ }
+ }
+ /* soft_id could have been used as memzone string, but soft_id is
+ * not currently used so it has no significance.
+ */
+ snprintf(mz_name, sizeof(mz_name) - 1, "%lx",
+ (unsigned long)rte_get_timer_cycles());
+ mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN,
+ SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE);
+ if (!mz) {
+ DP_ERR(edev, "Failed to allocate memzone for fdir, err = %s\n",
+ rte_strerror(rte_errno));
+ rc = -rte_errno;
+ goto err1;
+ }
+
+ pkt = mz->addr;
+ memset(pkt, 0, QEDE_MAX_FDIR_PKT_LEN);
+ pkt_len = qede_fdir_construct_pkt(eth_dev, fdir_filter, pkt,
+ &qdev->fdir_info.arfs);
+ if (pkt_len == 0) {
+ rc = -EINVAL;
+ goto err2;
+ }
+ DP_INFO(edev, "pkt_len = %u memzone = %s\n", pkt_len, mz_name);
+ if (add) {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0) {
+ DP_ERR(edev, "flowdir filter exist\n");
+ rc = -EEXIST;
+ goto err2;
+ }
+ }
+ } else {
+ SLIST_FOREACH(tmp, &qdev->fdir_info.fdir_list_head, list) {
+ if (memcmp(tmp->mz->addr, pkt, pkt_len) == 0)
+ break;
+ }
+ if (!tmp) {
+ DP_ERR(edev, "flowdir filter does not exist\n");
+ rc = -EEXIST;
+ goto err2;
+ }
+ }
+ p_hwfn = ECORE_LEADING_HWFN(edev);
+ if (add) {
+ if (!qdev->fdir_info.arfs.arfs_enable) {
+ /* Force update */
+ eth_dev->data->dev_conf.fdir_conf.mode =
+ RTE_FDIR_MODE_PERFECT;
+ qdev->fdir_info.arfs.arfs_enable = true;
+ DP_INFO(edev, "Force enable flowdir in perfect mode\n");
+ }
+ /* Enable ARFS searcher with updated flow_types */
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ /* configure filter with ECORE_SPQ_MODE_EBLOCK */
+ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, NULL,
+ (dma_addr_t)mz->phys_addr,
+ pkt_len,
+ fdir_filter->action.rx_queue,
+ 0, add);
+ if (rc == ECORE_SUCCESS) {
+ if (add) {
+ fdir->rx_queue = fdir_filter->action.rx_queue;
+ fdir->pkt_len = pkt_len;
+ fdir->mz = mz;
+ SLIST_INSERT_HEAD(&qdev->fdir_info.fdir_list_head,
+ fdir, list);
+ qdev->fdir_info.filter_count++;
+ DP_INFO(edev, "flowdir filter added, count = %d\n",
+ qdev->fdir_info.filter_count);
+ } else {
+ rte_memzone_free(tmp->mz);
+ SLIST_REMOVE(&qdev->fdir_info.fdir_list_head, tmp,
+ qede_fdir_entry, list);
+ rte_free(tmp); /* the node deleted */
+ rte_memzone_free(mz); /* temp node allocated */
+ qdev->fdir_info.filter_count--;
+ DP_INFO(edev, "Fdir filter deleted, count = %d\n",
+ qdev->fdir_info.filter_count);
+ }
+ } else {
+ DP_ERR(edev, "flowdir filter failed, rc=%d filter_count=%d\n",
+ rc, qdev->fdir_info.filter_count);
+ }
+
+ /* Disable ARFS searcher if there are no more filters */
+ if (qdev->fdir_info.filter_count == 0) {
+ memset(&qdev->fdir_info.arfs, 0,
+ sizeof(struct ecore_arfs_config_params));
+ DP_INFO(edev, "Disabling flowdir\n");
+ qdev->fdir_info.arfs.arfs_enable = false;
+ ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &qdev->fdir_info.arfs);
+ }
+ return 0;
+
+err2:
+ rte_memzone_free(mz);
+err1:
+ if (add)
+ rte_free(fdir);
+ return rc;
+}
+
+static int
+qede_fdir_filter_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ bool add)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+ if (!QEDE_VALID_FLOW(fdir->input.flow_type)) {
+ DP_ERR(edev, "invalid flow_type input\n");
+ return -EINVAL;
+ }
+
+ if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) {
+ DP_ERR(edev, "invalid queue number %u\n",
+ fdir->action.rx_queue);
+ return -EINVAL;
+ }
+
+ if (fdir->input.flow_ext.is_vf) {
+ DP_ERR(edev, "flowdir is not supported over VF\n");
+ return -EINVAL;
+ }
+
+ return qede_config_cmn_fdir_filter(eth_dev, fdir, add);
+}
+
+/* Fills the L3/L4 headers and returns the actual length of flowdir packet */
+uint16_t
+qede_fdir_construct_pkt(struct rte_eth_dev *eth_dev,
+ struct rte_eth_fdir_filter *fdir,
+ void *buff,
+ struct ecore_arfs_config_params *params)
+
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ uint16_t *ether_type;
+ uint8_t *raw_pkt;
+ struct rte_eth_fdir_input *input;
+ static uint8_t vlan_frame[] = {0x81, 0, 0, 0};
+ struct ipv4_hdr *ip;
+ struct ipv6_hdr *ip6;
+ struct udp_hdr *udp;
+ struct tcp_hdr *tcp;
+ uint16_t len;
+ static const uint8_t next_proto[] = {
+ [RTE_ETH_FLOW_NONFRAG_IPV4_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV4_UDP] = IPPROTO_UDP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_TCP] = IPPROTO_TCP,
+ [RTE_ETH_FLOW_NONFRAG_IPV6_UDP] = IPPROTO_UDP,
+ };
+ raw_pkt = (uint8_t *)buff;
+ input = &fdir->input;
+ DP_INFO(edev, "flow_type %d\n", input->flow_type);
+
+ len = 2 * sizeof(struct ether_addr);
+ raw_pkt += 2 * sizeof(struct ether_addr);
+ if (input->flow_ext.vlan_tci) {
+ DP_INFO(edev, "adding VLAN header\n");
+ rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame));
+ rte_memcpy(raw_pkt + sizeof(uint16_t),
+ &input->flow_ext.vlan_tci,
+ sizeof(uint16_t));
+ raw_pkt += sizeof(vlan_frame);
+ len += sizeof(vlan_frame);
+ }
+ ether_type = (uint16_t *)raw_pkt;
+ raw_pkt += sizeof(uint16_t);
+ len += sizeof(uint16_t);
+
+ switch (input->flow_type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ /* fill the common ip header */
+ ip = (struct ipv4_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+ ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL;
+ ip->total_length = sizeof(struct ipv4_hdr);
+ ip->next_proto_id = input->flow.ip4_flow.proto ?
+ input->flow.ip4_flow.proto :
+ next_proto[input->flow_type];
+ ip->time_to_live = input->flow.ip4_flow.ttl ?
+ input->flow.ip4_flow.ttl :
+ QEDE_FDIR_IPV4_DEF_TTL;
+ ip->type_of_service = input->flow.ip4_flow.tos;
+ ip->dst_addr = input->flow.ip4_flow.dst_ip;
+ ip->src_addr = input->flow.ip4_flow.src_ip;
+ len += sizeof(struct ipv4_hdr);
+ params->ipv4 = true;
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->dst_port = input->flow.udp4_flow.dst_port;
+ udp->src_port = input->flow.udp4_flow.src_port;
+ udp->dgram_len = sizeof(struct udp_hdr);
+ len += sizeof(struct udp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ /* adjust ip total_length */
+ ip->total_length += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ ip6 = (struct ipv6_hdr *)raw_pkt;
+ *ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
+ ip6->proto = input->flow.ipv6_flow.proto ?
+ input->flow.ipv6_flow.proto :
+ next_proto[input->flow_type];
+ rte_memcpy(&ip6->src_addr, &input->flow.ipv6_flow.dst_ip,
+ IPV6_ADDR_LEN);
+ rte_memcpy(&ip6->dst_addr, &input->flow.ipv6_flow.src_ip,
+ IPV6_ADDR_LEN);
+ len += sizeof(struct ipv6_hdr);
+
+ raw_pkt = (uint8_t *)buff;
+ /* UDP */
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ udp = (struct udp_hdr *)(raw_pkt + len);
+ udp->src_port = input->flow.udp6_flow.dst_port;
+ udp->dst_port = input->flow.udp6_flow.src_port;
+ len += sizeof(struct udp_hdr);
+ params->udp = true;
+ } else { /* TCP */
+ tcp = (struct tcp_hdr *)(raw_pkt + len);
+ tcp->src_port = input->flow.tcp4_flow.src_port;
+ tcp->dst_port = input->flow.tcp4_flow.dst_port;
+ tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF;
+ len += sizeof(struct tcp_hdr);
+ params->tcp = true;
+ }
+ break;
+ default:
+ DP_ERR(edev, "Unsupported flow_type %u\n",
+ input->flow_type);
+ return 0;
+ }
+
+ return len;
+}
+
+int
+qede_fdir_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_fdir_filter *fdir;
+ int ret;
+
+ fdir = (struct rte_eth_fdir_filter *)arg;
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query flowdir support */
+ if (edev->num_hwfns > 1) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 1);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = qede_fdir_filter_add(eth_dev, fdir, 0);
+ break;
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_INFO:
+ return -ENOTSUP;
+ break;
+ default:
+ DP_ERR(edev, "unknown operation %u", filter_op);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int qede_ntuple_filter_conf(struct rte_eth_dev *eth_dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
+ struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+ struct rte_eth_ntuple_filter *ntuple;
+ struct rte_eth_fdir_filter fdir_entry;
+ struct rte_eth_tcpv4_flow *tcpv4_flow;
+ struct rte_eth_udpv4_flow *udpv4_flow;
+ bool add = false;
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_NOP:
+ /* Typically used to query fdir support */
+ if (edev->num_hwfns > 1) {
+ DP_ERR(edev, "flowdir is not supported in 100G mode\n");
+ return -ENOTSUP;
+ }
+ return 0; /* means supported */
+ case RTE_ETH_FILTER_ADD:
+ add = true;
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ break;
+ case RTE_ETH_FILTER_INFO:
+ case RTE_ETH_FILTER_GET:
+ case RTE_ETH_FILTER_UPDATE:
+ case RTE_ETH_FILTER_FLUSH:
+ case RTE_ETH_FILTER_SET:
+ case RTE_ETH_FILTER_STATS:
+ case RTE_ETH_FILTER_OP_MAX:
+ DP_ERR(edev, "Unsupported filter_op %d\n", filter_op);
+ return -ENOTSUP;
+ }
+ ntuple = (struct rte_eth_ntuple_filter *)arg;
+ /* Internally convert ntuple to fdir entry */
+ memset(&fdir_entry, 0, sizeof(fdir_entry));
+ if (ntuple->proto == IPPROTO_TCP) {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ tcpv4_flow = &fdir_entry.input.flow.tcp4_flow;
+ tcpv4_flow->ip.src_ip = ntuple->src_ip;
+ tcpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ tcpv4_flow->ip.proto = IPPROTO_TCP;
+ tcpv4_flow->src_port = ntuple->src_port;
+ tcpv4_flow->dst_port = ntuple->dst_port;
+ } else {
+ fdir_entry.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
+ udpv4_flow = &fdir_entry.input.flow.udp4_flow;
+ udpv4_flow->ip.src_ip = ntuple->src_ip;
+ udpv4_flow->ip.dst_ip = ntuple->dst_ip;
+ udpv4_flow->ip.proto = IPPROTO_TCP;
+ udpv4_flow->src_port = ntuple->src_port;
+ udpv4_flow->dst_port = ntuple->dst_port;
+ }
+ return qede_config_cmn_fdir_filter(eth_dev, &fdir_entry, add);
+}
diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h
index 2131fe2a..405c525e 100644
--- a/drivers/net/qede/qede_if.h
+++ b/drivers/net/qede/qede_if.h
@@ -30,11 +30,24 @@ struct qed_dev_info {
/* MFW version */
uint32_t mfw_rev;
+#define QED_MFW_VERSION_0_MASK 0x000000FF
+#define QED_MFW_VERSION_0_OFFSET 0
+#define QED_MFW_VERSION_1_MASK 0x0000FF00
+#define QED_MFW_VERSION_1_OFFSET 8
+#define QED_MFW_VERSION_2_MASK 0x00FF0000
+#define QED_MFW_VERSION_2_OFFSET 16
+#define QED_MFW_VERSION_3_MASK 0xFF000000
+#define QED_MFW_VERSION_3_OFFSET 24
uint32_t flash_size;
uint8_t mf_mode;
bool tx_switching;
- /* To be added... */
+ u16 mtu;
+
+ /* Out param for qede */
+ bool vxlan_enable;
+ bool gre_enable;
+ bool geneve_enable;
};
enum qed_sb_type {
@@ -88,8 +101,45 @@ struct qed_slowpath_params {
#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
+struct qed_eth_tlvs {
+ u16 feat_flags;
+ u8 mac[3][ETH_ALEN];
+ u16 lso_maxoff;
+ u16 lso_minseg;
+ bool prom_mode;
+ u16 num_txqs;
+ u16 num_rxqs;
+ u16 num_netqs;
+ u16 flex_vlan;
+ u32 tcp4_offloads;
+ u32 tcp6_offloads;
+ u16 tx_avg_qdepth;
+ u16 rx_avg_qdepth;
+ u8 txqs_empty;
+ u8 rxqs_empty;
+ u8 num_txqs_full;
+ u8 num_rxqs_full;
+};
+
+struct qed_tunn_update_params {
+ unsigned long tunn_mode_update_mask;
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
struct qed_common_cb_ops {
void (*link_update)(void *dev, struct qed_link_output *link);
+ void (*get_tlv_data)(void *dev, struct qed_eth_tlvs *data);
};
struct qed_selftest_ops {
@@ -108,16 +158,17 @@ struct qed_common_ops {
struct rte_pci_device *pci_dev,
enum qed_protocol protocol,
uint32_t dp_module, uint8_t dp_level, bool is_vf);
- void (*set_id)(struct ecore_dev *edev,
- char name[], const char ver_str[]);
- enum _ecore_status_t (*chain_alloc)(struct ecore_dev *edev,
- enum ecore_chain_use_mode
- intended_use,
- enum ecore_chain_mode mode,
- enum ecore_chain_cnt_type cnt_type,
- uint32_t num_elems,
- osal_size_t elem_size,
- struct ecore_chain *p_chain);
+ void (*set_name)(struct ecore_dev *edev, char name[]);
+ enum _ecore_status_t
+ (*chain_alloc)(struct ecore_dev *edev,
+ enum ecore_chain_use_mode
+ intended_use,
+ enum ecore_chain_mode mode,
+ enum ecore_chain_cnt_type cnt_type,
+ uint32_t num_elems,
+ osal_size_t elem_size,
+ struct ecore_chain *p_chain,
+ struct ecore_chain_ext_pbl *ext_pbl);
void (*chain_free)(struct ecore_dev *edev,
struct ecore_chain *p_chain);
@@ -147,9 +198,16 @@ struct qed_common_ops {
dma_addr_t sb_phy_addr,
uint16_t sb_id, enum qed_sb_type type);
+ int (*get_sb_info)(struct ecore_dev *edev,
+ struct ecore_sb_info *sb, u16 qid,
+ struct ecore_sb_info_dbg *sb_dbg);
+
bool (*can_link_change)(struct ecore_dev *edev);
+
void (*update_msglvl)(struct ecore_dev *edev,
uint32_t dp_module, uint8_t dp_level);
+
+ int (*send_drv_state)(struct ecore_dev *edev, bool active);
};
#endif /* _QEDE_IF_H */
diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h
index 45c4af09..25c14d8b 100644
--- a/drivers/net/qede/qede_logs.h
+++ b/drivers/net/qede/qede_logs.h
@@ -16,15 +16,18 @@
(p_dev)->name ? (p_dev)->name : "", \
##__VA_ARGS__)
+#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
#define DP_NOTICE(p_dev, is_assert, fmt, ...) \
rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\
"[QEDE PMD: (%s)]%s:" fmt, \
(p_dev)->name ? (p_dev)->name : "", \
__func__, \
##__VA_ARGS__)
+#else
+#define DP_NOTICE(p_dev, fmt, ...) do { } while (0)
+#endif
#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO
-
#define DP_INFO(p_dev, fmt, ...) \
rte_log(RTE_LOG_INFO, RTE_LOGTYPE_PMD, \
"[%s:%d(%s)]" fmt, \
@@ -33,7 +36,6 @@
##__VA_ARGS__)
#else
#define DP_INFO(p_dev, fmt, ...) do { } while (0)
-
#endif
#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
@@ -77,14 +79,4 @@ do { \
#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0)
#endif
-#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
-
#endif /* _QEDE_LOGS_H_ */
diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c
index b666e1c7..712c03fd 100644
--- a/drivers/net/qede/qede_main.c
+++ b/drivers/net/qede/qede_main.c
@@ -12,8 +12,6 @@
#include "qede_ethdev.h"
-static uint8_t npar_tx_switching = 1;
-
/* Alarm timeout. */
#define QEDE_ALARM_TIMEOUT_US 100000
@@ -21,7 +19,7 @@ static uint8_t npar_tx_switching = 1;
char fw_file[PATH_MAX];
const char *QEDE_DEFAULT_FIRMWARE =
- "/lib/firmware/qed/qed_init_values-8.10.9.0.bin";
+ "/lib/firmware/qed/qed_init_values-8.18.9.0.bin";
static void
qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params)
@@ -50,11 +48,12 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
int rc;
ecore_init_struct(edev);
+ edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
qdev->protocol = protocol;
- if (is_vf) {
+
+ if (is_vf)
edev->b_is_vf = true;
- edev->b_hw_channel = true; /* @DPDK */
- }
+
ecore_init_dp(edev, dp_module, dp_level, NULL);
qed_init_pci(edev, pci_dev);
@@ -62,6 +61,8 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
hw_prepare_params.personality = ECORE_PCI_ETH;
hw_prepare_params.drv_resc_alloc = false;
hw_prepare_params.chk_reg_fifo = false;
+ hw_prepare_params.initiate_pf_flr = true;
+ hw_prepare_params.epoch = (u32)time(NULL);
rc = ecore_hw_prepare(edev, &hw_prepare_params);
if (rc) {
DP_ERR(edev, "hw prepare failed\n");
@@ -73,7 +74,7 @@ qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev,
static int qed_nic_setup(struct ecore_dev *edev)
{
- int rc, i;
+ int rc;
rc = ecore_resc_alloc(edev);
if (rc)
@@ -221,27 +222,33 @@ static void qed_stop_iov_task(struct ecore_dev *edev)
static int qed_slowpath_start(struct ecore_dev *edev,
struct qed_slowpath_params *params)
{
- bool allow_npar_tx_switching;
const uint8_t *data = NULL;
struct ecore_hwfn *hwfn;
struct ecore_mcp_drv_version drv_version;
struct ecore_hw_init_params hw_init_params;
- struct qede_dev *qdev = (struct qede_dev *)edev;
+ struct ecore_ptt *p_ptt;
int rc;
-#ifdef QED_ENC_SUPPORTED
- struct ecore_tunn_start_params tunn_info;
-#endif
-#ifdef CONFIG_ECORE_BINARY_FW
if (IS_PF(edev)) {
+#ifdef CONFIG_ECORE_BINARY_FW
rc = qed_load_firmware_data(edev);
if (rc) {
- DP_NOTICE(edev, true,
- "Failed to find fw file %s\n", fw_file);
+ DP_ERR(edev, "Failed to find fw file %s\n", fw_file);
goto err;
}
- }
#endif
+ hwfn = ECORE_LEADING_HWFN(edev);
+ if (edev->num_hwfns == 1) { /* skip aRFS for 100G device */
+ p_ptt = ecore_ptt_acquire(hwfn);
+ if (p_ptt) {
+ ECORE_LEADING_HWFN(edev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_ERR(edev, "Failed to acquire PTT for flowdir\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+ }
+ }
rc = qed_nic_setup(edev);
if (rc)
@@ -257,38 +264,26 @@ static int qed_slowpath_start(struct ecore_dev *edev,
if (rc) {
DP_NOTICE(edev, true,
"Failed to allocate stream memory\n");
- goto err2;
+ goto err1;
}
}
+#endif
qed_start_iov_task(edev);
-#endif
#ifdef CONFIG_ECORE_BINARY_FW
if (IS_PF(edev))
data = (const uint8_t *)edev->firmware + sizeof(u32);
#endif
- allow_npar_tx_switching = npar_tx_switching ? true : false;
-
/* Start the slowpath */
memset(&hw_init_params, 0, sizeof(hw_init_params));
-#ifdef QED_ENC_SUPPORTED
- memset(&tunn_info, 0, sizeof(tunn_info));
- tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
- 1 << QED_MODE_L2GRE_TUNN |
- 1 << QED_MODE_IPGRE_TUNN |
- 1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN;
- tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
- tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
- hw_init_params.p_tunn = &tunn_info;
-#endif
hw_init_params.b_hw_start = true;
hw_init_params.int_mode = ECORE_INT_MODE_MSIX;
- hw_init_params.allow_npar_tx_switch = allow_npar_tx_switching;
+ hw_init_params.allow_npar_tx_switch = true;
hw_init_params.bin_fw_data = data;
- hw_init_params.epoch = (u32)time(NULL);
+ hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT;
+ hw_init_params.avoid_eng_reset = false;
rc = ecore_hw_init(edev, &hw_init_params);
if (rc) {
DP_ERR(edev, "ecore_hw_init failed\n");
@@ -310,7 +305,7 @@ static int qed_slowpath_start(struct ecore_dev *edev,
if (rc) {
DP_NOTICE(edev, true,
"Failed sending drv version command\n");
- return rc;
+ goto err3;
}
}
@@ -318,8 +313,14 @@ static int qed_slowpath_start(struct ecore_dev *edev,
return 0;
+err3:
ecore_hw_stop(edev);
err2:
+ qed_stop_iov_task(edev);
+#ifdef CONFIG_ECORE_ZIPPED_FW
+ qed_free_stream_mem(edev);
+err1:
+#endif
ecore_resc_free(edev);
err:
#ifdef CONFIG_ECORE_BINARY_FW
@@ -338,27 +339,40 @@ static int
qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
{
struct ecore_ptt *ptt = NULL;
+ struct ecore_tunnel_info *tun = &edev->tunnel;
memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+ if (tun->vxlan.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == ECORE_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
dev_info->num_hwfns = edev->num_hwfns;
dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]);
+ dev_info->mtu = ECORE_LEADING_HWFN(edev)->hw_info.mtu;
+
rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr,
ETHER_ADDR_LEN);
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+
if (IS_PF(edev)) {
- dev_info->fw_major = FW_MAJOR_VERSION;
- dev_info->fw_minor = FW_MINOR_VERSION;
- dev_info->fw_rev = FW_REVISION_VERSION;
- dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = edev->mf_mode;
dev_info->tx_switching = false;
- } else {
- ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major,
- &dev_info->fw_minor, &dev_info->fw_rev,
- &dev_info->fw_eng);
- }
- if (IS_PF(edev)) {
ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev));
if (ptt) {
ecore_mcp_get_mfw_ver(ECORE_LEADING_HWFN(edev), ptt,
@@ -386,7 +400,6 @@ qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info)
int
qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
{
- struct qede_dev *qdev = (struct qede_dev *)edev;
uint8_t queues = 0;
int i;
@@ -416,11 +429,6 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
if (edev->num_hwfns > 1) {
ecore_vf_get_num_rxqs(&edev->hwfns[1], &queues);
info->num_queues += queues;
- /* Restrict 100G VF to advertise 16 queues till the
- * required support is available to go beyond 16.
- */
- info->num_queues = RTE_MIN(info->num_queues,
- ECORE_MAX_VF_CHAINS_PER_PF);
}
ecore_vf_get_num_vlan_filters(&edev->hwfns[0],
@@ -428,6 +436,8 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
ecore_vf_get_port_mac(&edev->hwfns[0],
(uint8_t *)&info->port_mac);
+
+ info->is_legacy = ecore_vf_get_pre_fp_hsi(&edev->hwfns[0]);
}
qed_fill_dev_info(edev, &info->common);
@@ -438,9 +448,7 @@ qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info)
return 0;
}
-static void
-qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
- const char ver_str[NAME_SIZE])
+static void qed_set_name(struct ecore_dev *edev, char name[NAME_SIZE])
{
int i;
@@ -448,8 +456,6 @@ qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE],
for_each_hwfn(edev, i) {
snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
}
- memcpy(edev->ver_str, ver_str, NAME_SIZE);
- edev->drv_type = DRV_ID_DRV_TYPE_LINUX;
}
static uint32_t
@@ -490,7 +496,6 @@ static void qed_fill_link(struct ecore_hwfn *hwfn,
struct ecore_mcp_link_params params;
struct ecore_mcp_link_state link;
struct ecore_mcp_link_capabilities link_caps;
- uint32_t media_type;
uint8_t change = 0;
memset(if_link, 0, sizeof(*if_link));
@@ -638,19 +643,6 @@ static int qed_nic_stop(struct ecore_dev *edev)
return rc;
}
-static int qed_nic_reset(struct ecore_dev *edev)
-{
- int rc;
-
- rc = ecore_hw_reset(edev);
- if (rc)
- return rc;
-
- ecore_resc_free(edev);
-
- return 0;
-}
-
static int qed_slowpath_stop(struct ecore_dev *edev)
{
#ifdef CONFIG_QED_SRIOV
@@ -669,10 +661,11 @@ static int qed_slowpath_stop(struct ecore_dev *edev)
if (IS_QED_ETH_IF(edev))
qed_sriov_disable(edev, true);
#endif
- qed_nic_stop(edev);
}
- qed_nic_reset(edev);
+ qed_nic_stop(edev);
+
+ ecore_resc_free(edev);
qed_stop_iov_task(edev);
return 0;
@@ -686,17 +679,61 @@ static void qed_remove(struct ecore_dev *edev)
ecore_hw_remove(edev);
}
+static int qed_send_drv_state(struct ecore_dev *edev, bool active)
+{
+ struct ecore_hwfn *hwfn = ECORE_LEADING_HWFN(edev);
+ struct ecore_ptt *ptt;
+ int status = 0;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = ecore_mcp_ov_update_driver_state(hwfn, ptt, active ?
+ ECORE_OV_DRIVER_STATE_ACTIVE :
+ ECORE_OV_DRIVER_STATE_DISABLED);
+
+ ecore_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
+static int qed_get_sb_info(struct ecore_dev *edev, struct ecore_sb_info *sb,
+ u16 qid, struct ecore_sb_info_dbg *sb_dbg)
+{
+ struct ecore_hwfn *hwfn = &edev->hwfns[qid % edev->num_hwfns];
+ struct ecore_ptt *ptt;
+ int rc;
+
+ if (IS_VF(edev))
+ return -EINVAL;
+
+ ptt = ecore_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, true, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+
+ memset(sb_dbg, 0, sizeof(*sb_dbg));
+ rc = ecore_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
+
+ ecore_ptt_release(hwfn, ptt);
+ return rc;
+}
+
const struct qed_common_ops qed_common_ops_pass = {
INIT_STRUCT_FIELD(probe, &qed_probe),
INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params),
INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start),
- INIT_STRUCT_FIELD(set_id, &qed_set_id),
+ INIT_STRUCT_FIELD(set_name, &qed_set_name),
INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc),
INIT_STRUCT_FIELD(chain_free, &ecore_chain_free),
INIT_STRUCT_FIELD(sb_init, &qed_sb_init),
+ INIT_STRUCT_FIELD(get_sb_info, &qed_get_sb_info),
INIT_STRUCT_FIELD(get_link, &qed_get_current_link),
INIT_STRUCT_FIELD(set_link, &qed_set_link),
INIT_STRUCT_FIELD(drain, &qed_drain),
INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop),
INIT_STRUCT_FIELD(remove, &qed_remove),
+ INIT_STRUCT_FIELD(send_drv_state, &qed_send_drv_state),
};
diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c
index a34b6659..baea1bb0 100644
--- a/drivers/net/qede/qede_rxtx.c
+++ b/drivers/net/qede/qede_rxtx.c
@@ -6,10 +6,9 @@
* See LICENSE.qede_pmd for copyright and licensing details.
*/
+#include <rte_net.h>
#include "qede_rxtx.h"
-static bool gro_disable = 1; /* mod_param */
-
static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
{
struct rte_mbuf *new_mb = NULL;
@@ -69,7 +68,7 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
{
unsigned int i;
- PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
+ PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs", txq->nb_tx_desc);
if (txq->sw_tx_ring) {
for (i = 0; i < txq->nb_tx_desc; i++) {
@@ -84,16 +83,16 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
int
qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc, unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct qede_dev *qdev = dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct rte_eth_dev_data *eth_data = dev->data;
+ struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
- uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint16_t max_rx_pkt_len;
+ uint16_t bufsz;
size_t size;
- uint16_t data_size;
int rc;
int i;
@@ -127,34 +126,27 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->nb_rx_desc = nb_desc;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
-
- /* Sanity check */
- data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) -
- RTE_PKTMBUF_HEADROOM;
-
- if (pkt_len > data_size && !dev->data->scattered_rx) {
- DP_ERR(edev, "MTU %u should not exceed dataroom %u\n",
- pkt_len, data_size);
- rte_free(rxq);
- return -EINVAL;
+ max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+ qdev->mtu = max_rx_pkt_len;
+
+ /* Fix up RX buffer size */
+ bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ if ((rxmode->enable_scatter) ||
+ (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ if (!dev->data->scattered_rx) {
+ DP_INFO(edev, "Forcing scatter-gather mode\n");
+ dev->data->scattered_rx = 1;
+ }
}
-
if (dev->data->scattered_rx)
- rxq->rx_buf_size = data_size;
+ rxq->rx_buf_size = bufsz + QEDE_ETH_OVERHEAD;
else
- rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD;
+ rxq->rx_buf_size = qdev->mtu + QEDE_ETH_OVERHEAD;
+ /* Align to cache-line size if needed */
+ rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
- qdev->mtu = pkt_len;
-
- DP_INFO(edev, "MTU = %u ; RX buffer = %u\n",
- qdev->mtu, rxq->rx_buf_size);
-
- if (pkt_len > ETHER_MAX_LEN) {
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
- DP_NOTICE(edev, false, "jumbo frame enabled\n");
- } else {
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
- }
+ DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
+ qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
/* Allocate the parallel driver ring for Rx buffers */
size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
@@ -176,7 +168,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
ECORE_CHAIN_CNT_TYPE_U16,
rxq->nb_rx_desc,
sizeof(struct eth_rx_bd),
- &rxq->rx_bd_ring);
+ &rxq->rx_bd_ring,
+ NULL);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(edev, false,
@@ -196,7 +189,8 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
ECORE_CHAIN_CNT_TYPE_U16,
rxq->nb_rx_desc,
sizeof(union eth_rx_cqe),
- &rxq->rx_comp_ring);
+ &rxq->rx_comp_ring,
+ NULL);
if (rc != ECORE_SUCCESS) {
DP_NOTICE(edev, false,
@@ -291,7 +285,8 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
ECORE_CHAIN_CNT_TYPE_U16,
txq->nb_tx_desc,
sizeof(union eth_tx_bd_types),
- &txq->tx_pbl);
+ &txq->tx_pbl,
+ NULL);
if (rc != ECORE_SUCCESS) {
DP_ERR(edev,
"Unable to allocate memory for txbd ring on socket %u",
@@ -335,8 +330,8 @@ qede_tx_queue_setup(struct rte_eth_dev *dev,
static void qede_init_fp(struct qede_dev *qdev)
{
struct qede_fastpath *fp;
- uint8_t i, rss_id, tc;
- int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
+ uint8_t i;
+ int fp_rx = qdev->fp_num_rx;
memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
sizeof(*qdev->fp_array)));
@@ -356,7 +351,6 @@ static void qede_init_fp(struct qede_dev *qdev)
snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
}
- qdev->gro_disable = gro_disable;
}
void qede_free_fp_arrays(struct qede_dev *qdev)
@@ -373,11 +367,9 @@ void qede_free_fp_arrays(struct qede_dev *qdev)
}
}
-int qede_alloc_fp_array(struct qede_dev *qdev)
+static int qede_alloc_fp_array(struct qede_dev *qdev)
{
- struct qede_fastpath *fp;
struct ecore_dev *edev = &qdev->edev;
- int i;
qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
sizeof(*qdev->fp_array),
@@ -483,7 +475,8 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
}
static inline void
-qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+qede_update_rx_prod(__rte_unused struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
{
uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
@@ -510,104 +503,63 @@ qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
*/
rte_wmb();
- PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u\n", bd_prod, cqe_prod);
-}
-
-static inline uint32_t
-qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
-{
- return index % n_rx_rings;
-}
-
-static void qede_prandom_bytes(uint32_t *buff, size_t bytes)
-{
- unsigned int i;
-
- srand((unsigned int)time(NULL));
-
- for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
- buff[i] = rand();
+ PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
}
-static bool
-qede_check_vport_rss_enable(struct rte_eth_dev *eth_dev,
- struct qed_update_vport_rss_params *rss_params)
+static void
+qede_update_sge_tpa_params(struct ecore_sge_tpa_params *sge_tpa_params,
+ uint16_t mtu, bool enable)
{
- struct rte_eth_rss_conf rss_conf;
- enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode;
- struct qede_dev *qdev = eth_dev->data->dev_private;
- struct ecore_dev *edev = &qdev->edev;
- uint8_t rss_caps;
- unsigned int i;
- uint64_t hf;
- uint32_t *key;
-
- PMD_INIT_FUNC_TRACE(edev);
-
- rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
- key = (uint32_t *)rss_conf.rss_key;
- hf = rss_conf.rss_hf;
-
- /* Check if RSS conditions are met.
- * Note: Even though its meaningless to enable RSS with one queue, it
- * could be used to produce RSS Hash, so skipping that check.
+ /* Enable LRO in split mode */
+ sge_tpa_params->tpa_ipv4_en_flg = enable;
+ sge_tpa_params->tpa_ipv6_en_flg = enable;
+ sge_tpa_params->tpa_ipv4_tunn_en_flg = false;
+ sge_tpa_params->tpa_ipv6_tunn_en_flg = false;
+ /* set if tpa enable changes */
+ sge_tpa_params->update_tpa_en_flg = 1;
+ /* set if tpa parameters should be handled */
+ sge_tpa_params->update_tpa_param_flg = enable;
+
+ sge_tpa_params->max_buffers_per_cqe = 20;
+ /* Enable TPA in split mode. In this mode each TPA segment
+ * starts on the new BD, so there is one BD per segment.
*/
- if (!(mode & ETH_MQ_RX_RSS)) {
- DP_INFO(edev, "RSS flag is not set\n");
- return false;
- }
-
- if (hf == 0) {
- DP_INFO(edev, "Request to disable RSS\n");
- return false;
- }
-
- memset(rss_params, 0, sizeof(*rss_params));
-
- for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
- rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i,
- QEDE_RSS_COUNT(qdev));
-
- if (!key)
- qede_prandom_bytes(rss_params->rss_key,
- sizeof(rss_params->rss_key));
- else
- memcpy(rss_params->rss_key, rss_conf.rss_key,
- rss_conf.rss_key_len);
-
- qede_init_rss_caps(&rss_caps, hf);
-
- rss_params->rss_caps = rss_caps;
-
- DP_INFO(edev, "RSS conditions are met\n");
-
- return true;
+ sge_tpa_params->tpa_pkt_split_flg = 1;
+ sge_tpa_params->tpa_hdr_data_split_flg = 0;
+ sge_tpa_params->tpa_gro_consistent_flg = 0;
+ sge_tpa_params->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ sge_tpa_params->tpa_max_size = 0x7FFF;
+ sge_tpa_params->tpa_min_size_to_start = mtu / 2;
+ sge_tpa_params->tpa_min_size_to_cont = mtu / 2;
}
-static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
+static int qede_start_queues(struct rte_eth_dev *eth_dev,
+ __rte_unused bool clear_stats)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
struct ecore_queue_start_common_params q_params;
- struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
struct qed_dev_info *qed_info = &qdev->dev_info.common;
struct qed_update_vport_params vport_update_params;
+ struct ecore_sge_tpa_params tpa_params;
struct qede_tx_queue *txq;
struct qede_fastpath *fp;
dma_addr_t p_phys_table;
int txq_index;
uint16_t page_cnt;
- int vlan_removal_en = 1;
int rc, tc, i;
for_each_queue(i) {
fp = &qdev->fp_array[i];
if (fp->type & QEDE_FASTPATH_RX) {
- p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
- rx_comp_ring);
- page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
- rx_comp_ring);
+ struct ecore_rxq_start_ret_params ret_params;
+
+ p_phys_table =
+ ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
+ page_cnt =
+ ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
+ memset(&ret_params, 0, sizeof(ret_params));
memset(&q_params, 0, sizeof(q_params));
q_params.queue_id = i;
q_params.vport_id = 0;
@@ -621,13 +573,17 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
fp->rxq->rx_bd_ring.p_phys_addr,
p_phys_table,
page_cnt,
- &fp->rxq->hw_rxq_prod_addr);
+ &ret_params);
if (rc) {
DP_ERR(edev, "Start rxq #%d failed %d\n",
fp->rxq->queue_id, rc);
return rc;
}
+ /* Use the return parameters */
+ fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ fp->rxq->handle = ret_params.p_handle;
+
fp->rxq->hw_cons_ptr =
&fp->sb_info->sb_virt->pi_array[RX_PI];
@@ -637,6 +593,8 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
if (!(fp->type & QEDE_FASTPATH_TX))
continue;
for (tc = 0; tc < qdev->num_tc; tc++) {
+ struct ecore_txq_start_ret_params ret_params;
+
txq = fp->txqs[tc];
txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
@@ -644,6 +602,7 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
memset(&q_params, 0, sizeof(q_params));
+ memset(&ret_params, 0, sizeof(ret_params));
q_params.queue_id = txq->queue_id;
q_params.vport_id = 0;
q_params.sb = fp->sb_info->igu_sb_id;
@@ -652,13 +611,16 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
rc = qdev->ops->q_tx_start(edev, i, &q_params,
p_phys_table,
page_cnt, /* **pp_doorbell */
- &txq->doorbell_addr);
+ &ret_params);
if (rc) {
DP_ERR(edev, "Start txq %u failed %d\n",
txq_index, rc);
return rc;
}
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
txq->hw_cons_ptr =
&fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
SET_FIELD(txq->tx_db.data.params,
@@ -688,16 +650,14 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
vport_update_params.tx_switching_flg = 1;
}
- if (qede_check_vport_rss_enable(eth_dev, rss_params)) {
- vport_update_params.update_rss_flg = 1;
- qdev->rss_enabled = 1;
- } else {
- qdev->rss_enabled = 0;
+ /* TPA */
+ if (qdev->enable_lro) {
+ DP_INFO(edev, "Enabling LRO\n");
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, true);
+ vport_update_params.sge_tpa_params = &tpa_params;
}
- rte_memcpy(&vport_update_params.rss_params, rss_params,
- sizeof(*rss_params));
-
rc = qdev->ops->vport_update(edev, &vport_update_params);
if (rc) {
DP_ERR(edev, "Update V-PORT failed %d\n", rc);
@@ -707,79 +667,91 @@ static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
return 0;
}
-#ifdef ENC_SUPPORTED
static bool qede_tunn_exist(uint16_t flag)
{
return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
}
-static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+/*
+ * qede_check_tunn_csum_l4:
+ * Returns:
+ * 1 : If L4 csum is enabled AND if the validation has failed.
+ * 0 : Otherwise
+ */
+static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
{
- uint8_t tcsum = 0;
- uint16_t csum_flag = 0;
-
if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
- csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
- PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
-
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
- csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
- tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
- }
-
- csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
- PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
- PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
- PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
- if (csum_flag & flag)
- return QEDE_CSUM_ERROR;
+ return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
- return QEDE_CSUM_UNNECESSARY | tcsum;
-}
-#else
-static inline uint8_t qede_tunn_exist(uint16_t flag)
-{
return 0;
}
-static inline uint8_t qede_check_tunn_csum(uint16_t flag)
+static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
{
+ if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
+ return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
+
return 0;
}
-#endif
-static inline uint8_t qede_check_notunn_csum(uint16_t flag)
+static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
{
- uint8_t csum = 0;
- uint16_t csum_flag = 0;
-
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
- csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
- csum = QEDE_CSUM_UNNECESSARY;
- }
-
- csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
- PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
-
- if (csum_flag & flag)
- return QEDE_CSUM_ERROR;
-
- return csum;
+ uint16_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+ };
+
+ /* Bits (0..3) provides L3/L4 protocol type */
+ val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
+ PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
+ (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
+ PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_MAX)
+ return ptype_lkup_tbl[val] | RTE_PTYPE_L2_ETHER;
+ else
+ return RTE_PTYPE_UNKNOWN;
}
-static inline uint8_t qede_check_csum(uint16_t flag)
+static inline uint8_t
+qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
{
- if (likely(!qede_tunn_exist(flag)))
- return qede_check_notunn_csum(flag);
- else
- return qede_check_tunn_csum(flag);
+ struct ipv4_hdr *ip;
+ uint16_t pkt_csum;
+ uint16_t calc_csum;
+ uint16_t val;
+
+ val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
+
+ if (unlikely(val)) {
+ m->packet_type = qede_rx_cqe_to_pkt_type(flag);
+ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
+ ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
+ sizeof(struct ether_hdr));
+ pkt_csum = ip->hdr_checksum;
+ ip->hdr_checksum = 0;
+ calc_csum = rte_ipv4_cksum(ip);
+ ip->hdr_checksum = pkt_csum;
+ return (calc_csum != pkt_csum);
+ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
+ return 1;
+ }
+ }
+ return 0;
}
static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
@@ -789,7 +761,7 @@ static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
}
static inline void
-qede_reuse_page(struct qede_dev *qdev,
+qede_reuse_page(__rte_unused struct qede_dev *qdev,
struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
{
struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
@@ -822,56 +794,153 @@ qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
}
}
-static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
+static inline void
+qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ uint8_t agg_index, uint16_t len)
+{
+ struct qede_agg_info *tpa_info;
+ struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
+ uint16_t cons_idx;
+
+ /* Under certain conditions it is possible that FW may not consume
+ * additional or new BD. So decision to consume the BD must be made
+ * based on len_list[0].
+ */
+ if (rte_le_to_cpu_16(len)) {
+ tpa_info = &rxq->tpa_info[agg_index];
+ cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+ assert(curr_frag);
+ curr_frag->nb_segs = 1;
+ curr_frag->pkt_len = rte_le_to_cpu_16(len);
+ curr_frag->data_len = curr_frag->pkt_len;
+ tpa_info->tpa_tail->next = curr_frag;
+ tpa_info->tpa_tail = curr_frag;
+ qede_rx_bd_ring_consume(rxq);
+ if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+ PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ }
+ }
+}
+
+static inline void
+qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
{
- uint32_t p_type;
- /* TBD - L4 indications needed ? */
- uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
- PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);
-
- /* protocol = 3 means LLC/SNAP over Ethernet */
- if (unlikely(protocol == 0 || protocol == 3))
- p_type = RTE_PTYPE_UNKNOWN;
- else if (protocol == 1)
- p_type = RTE_PTYPE_L3_IPV4;
- else if (protocol == 2)
- p_type = RTE_PTYPE_L3_IPV6;
-
- return RTE_PTYPE_L2_ETHER | p_type;
+ PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
+ cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
+ /* only len_list[0] will have value */
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
}
-int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
- int num_segs, uint16_t pkt_len)
+static inline void
+qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
+
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
+ /* Update total length and frags based on end TPA */
+ rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
+ /* TODO: Add Sanity Checks */
+ rx_mb->nb_segs = cqe->num_of_bds;
+ rx_mb->pkt_len = cqe->total_packet_len;
+
+ PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
+ " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
+ rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
+ rx_mb->pkt_len);
+}
+
+static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
+{
+ uint32_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
+ [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L2_ETHER,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ };
+
+ /* Cover bits[4-0] to include tunn_type and next protocol */
+ val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
+ (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
+ return ptype_tunn_lkup_tbl[val];
+ else
+ return RTE_PTYPE_UNKNOWN;
+}
+
+static inline int
+qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
+ uint8_t num_segs, uint16_t pkt_len)
{
struct qede_rx_queue *rxq = p_rxq;
struct qede_dev *qdev = rxq->qdev;
- struct ecore_dev *edev = &qdev->edev;
- uint16_t sw_rx_index, cur_size;
-
register struct rte_mbuf *seg1 = NULL;
register struct rte_mbuf *seg2 = NULL;
+ uint16_t sw_rx_index;
+ uint16_t cur_size;
seg1 = rx_mb;
while (num_segs) {
- cur_size = pkt_len > rxq->rx_buf_size ?
- rxq->rx_buf_size : pkt_len;
- if (!cur_size) {
- PMD_RX_LOG(DEBUG, rxq,
- "SG packet, len and num BD mismatch\n");
+ cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+ if (unlikely(!cur_size)) {
+ PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
+ " left for mapping jumbo", num_segs);
qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
return -EINVAL;
}
-
- if (qede_alloc_rx_buffer(rxq)) {
- uint8_t index;
-
- PMD_RX_LOG(DEBUG, rxq, "Buffer allocation failed\n");
- index = rxq->port_id;
- rte_eth_devices[index].data->rx_mbuf_alloc_failed++;
- rxq->rx_alloc_errors++;
- return -ENOMEM;
- }
-
sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
qede_rx_bd_ring_consume(rxq);
@@ -881,16 +950,9 @@ int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
seg1 = seg1->next;
num_segs--;
rxq->rx_segs++;
- continue;
}
- seg1 = NULL;
-
- if (pkt_len)
- PMD_RX_LOG(DEBUG, rxq,
- "Mapped all BDs of jumbo, but still have %d bytes\n",
- pkt_len);
- return ECORE_SUCCESS;
+ return 0;
}
uint16_t
@@ -903,14 +965,29 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
uint16_t rx_pkt = 0;
union eth_rx_cqe *cqe;
- struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
register struct rte_mbuf *rx_mb = NULL;
register struct rte_mbuf *seg1 = NULL;
enum eth_rx_cqe_type cqe_type;
- uint16_t len, pad, preload_idx, pkt_len, parse_flag;
- uint8_t csum_flag, num_segs;
+ uint16_t pkt_len = 0; /* Sum of all BD segments */
+ uint16_t len; /* Length of first BD */
+ uint8_t num_segs = 1;
+ uint16_t preload_idx;
+ uint16_t parse_flag;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ uint8_t bitfield_val;
enum rss_hash_type htype;
- int ret;
+#endif
+ uint8_t tunn_parse_flag;
+ uint8_t j;
+ struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
+ uint64_t ol_flags;
+ uint32_t packet_type;
+ uint16_t vlan_tci;
+ bool tpa_start_flg;
+ uint8_t offset, tpa_agg_idx, flags;
+ struct qede_agg_info *tpa_info = NULL;
+ uint32_t rss_hash;
hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
@@ -921,16 +998,59 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
return 0;
while (sw_comp_cons != hw_comp_cons) {
+ ol_flags = 0;
+ packet_type = RTE_PTYPE_UNKNOWN;
+ vlan_tci = 0;
+ tpa_start_flg = false;
+ rss_hash = 0;
+
/* Get the CQE from the completion ring */
cqe =
(union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
cqe_type = cqe->fast_path_regular.type;
-
- if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
- PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
-
+ PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
+
+ switch (cqe_type) {
+ case ETH_RX_CQE_TYPE_REGULAR:
+ fp_cqe = &cqe->fast_path_regular;
+ break;
+ case ETH_RX_CQE_TYPE_TPA_START:
+ cqe_start_tpa = &cqe->fast_path_tpa_start;
+ tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
+ tpa_start_flg = true;
+ /* Mark it as LRO packet */
+ ol_flags |= PKT_RX_LRO;
+ /* In split mode, seg_len is same as len_on_first_bd
+ * and ext_bd_len_list will be empty since there are
+ * no additional buffers
+ */
+ PMD_RX_LOG(INFO, rxq,
+ "TPA start[%d] - len_on_first_bd %d header %d"
+ " [bd_list[0] %d], [seg_len %d]\n",
+ cqe_start_tpa->tpa_agg_index,
+ rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
+ cqe_start_tpa->header_len,
+ rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
+ rte_le_to_cpu_16(cqe_start_tpa->seg_len));
+
+ break;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_rx_process_tpa_cont_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_cont);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_rx_process_tpa_end_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_end);
+ tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
+ tpa_info = &rxq->tpa_info[tpa_agg_idx];
+ rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
+ goto tpa_end;
+ case ETH_RX_CQE_TYPE_SLOW_PATH:
+ PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
qdev->ops->eth_cqe_completion(edev, fp->id,
(struct eth_slow_path_rx_cqe *)cqe);
+ /* fall-thru */
+ default:
goto next_cqe;
}
@@ -939,32 +1059,96 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
assert(rx_mb != NULL);
- /* non GRO */
- fp_cqe = &cqe->fast_path_regular;
-
- len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
- pad = fp_cqe->placement_offset;
- assert((len + pad) <= rx_mb->buf_len);
-
- PMD_RX_LOG(DEBUG, rxq,
- "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
- " len = %u, parsing_flags = %d\n",
- cqe_type, fp_cqe->bitfields,
- rte_le_to_cpu_16(fp_cqe->vlan_tag),
- len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
-
- /* If this is an error packet then drop it */
- parse_flag =
- rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
- csum_flag = qede_check_csum(parse_flag);
- if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
- PMD_RX_LOG(ERR, rxq,
- "CQE in CONS = %u has error, flags = 0x%x "
- "dropping incoming packet\n",
- sw_comp_cons, parse_flag);
- rxq->rx_hw_errors++;
- qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
- goto next_cqe;
+ /* Handle regular CQE or TPA start CQE */
+ if (!tpa_start_flg) {
+ parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
+ offset = fp_cqe->placement_offset;
+ len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+ pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+ rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = fp_cqe->bitfields;
+ htype = (uint8_t)GET_FIELD(bitfield_val,
+ ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+#endif
+ } else {
+ parse_flag =
+ rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
+ offset = cqe_start_tpa->placement_offset;
+ /* seg_len = len_on_first_bd */
+ len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
+ vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = cqe_start_tpa->bitfields;
+ htype = (uint8_t)GET_FIELD(bitfield_val,
+ ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE);
+#endif
+ rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
+ }
+ if (qede_tunn_exist(parse_flag)) {
+ PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
+ if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (tpa_start_flg)
+ flags =
+ cqe_start_tpa->tunnel_pars_flags.flags;
+ else
+ flags = fp_cqe->tunnel_pars_flags.flags;
+ tunn_parse_flag = flags;
+ packet_type =
+ qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+ }
+ } else {
+ PMD_RX_LOG(INFO, rxq, "Rx non-tunneled packet\n");
+ if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ if (unlikely(qede_check_notunn_csum_l3(rx_mb,
+ parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "IP csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ packet_type =
+ qede_rx_cqe_to_pkt_type(parse_flag);
+ }
+ }
+
+ if (CQE_HAS_VLAN(parse_flag)) {
+ ol_flags |= PKT_RX_VLAN_PKT;
+ if (qdev->vlan_strip_flg) {
+ ol_flags |= PKT_RX_VLAN_STRIPPED;
+ rx_mb->vlan_tci = vlan_tci;
+ }
+ }
+ if (CQE_HAS_OUTER_VLAN(parse_flag)) {
+ ol_flags |= PKT_RX_QINQ_PKT;
+ if (qdev->vlan_strip_flg) {
+ rx_mb->vlan_tci = vlan_tci;
+ ol_flags |= PKT_RX_QINQ_STRIPPED;
+ }
+ rx_mb->vlan_tci_outer = 0;
+ }
+ /* RSS Hash */
+ if (qdev->rss_enable) {
+ ol_flags |= PKT_RX_RSS_HASH;
+ rx_mb->hash.rss = rss_hash;
}
if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
@@ -977,73 +1161,66 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->rx_alloc_errors++;
break;
}
-
qede_rx_bd_ring_consume(rxq);
- if (fp_cqe->bd_num > 1) {
- pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ if (!tpa_start_flg && fp_cqe->bd_num > 1) {
+ PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
+ " len on first: %04x Total Len: %04x",
+ fp_cqe->bd_num, len, pkt_len);
num_segs = fp_cqe->bd_num - 1;
-
- rxq->rx_segs++;
-
- pkt_len -= len;
seg1 = rx_mb;
- ret = qede_process_sg_pkts(p_rxq, seg1, num_segs,
- pkt_len);
- if (ret != ECORE_SUCCESS) {
- qede_recycle_rx_bd_ring(rxq, qdev,
- fp_cqe->bd_num);
+ if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
+ pkt_len - len))
goto next_cqe;
+ for (j = 0; j < num_segs; j++) {
+ if (qede_alloc_rx_buffer(rxq)) {
+ PMD_RX_LOG(ERR, rxq,
+ "Buffer allocation failed");
+ rte_eth_devices[rxq->port_id].
+ data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ break;
+ }
+ rxq->rx_segs++;
}
}
+ rxq->rx_segs++; /* for the first segment */
/* Prefetch next mbuf while processing current one. */
preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
- /* Update MBUF fields */
- rx_mb->ol_flags = 0;
- rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
- rx_mb->nb_segs = fp_cqe->bd_num;
- rx_mb->data_len = len;
- rx_mb->pkt_len = fp_cqe->pkt_len;
+ /* Update rest of the MBUF fields */
+ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
rx_mb->port = rxq->port_id;
- rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
-
- htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
- ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
- if (qdev->rss_enabled && htype) {
- rx_mb->ol_flags |= PKT_RX_RSS_HASH;
- rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
- PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
- rx_mb->hash.rss);
+ rx_mb->ol_flags = ol_flags;
+ rx_mb->data_len = len;
+ rx_mb->packet_type = packet_type;
+ PMD_RX_LOG(INFO, rxq,
+ "pkt_type 0x%04x len %u hash_type %d hash_val 0x%x"
+ " ol_flags 0x%04lx\n",
+ packet_type, len, htype, rx_mb->hash.rss,
+ (unsigned long)ol_flags);
+ if (!tpa_start_flg) {
+ rx_mb->nb_segs = fp_cqe->bd_num;
+ rx_mb->pkt_len = pkt_len;
+ } else {
+ /* store ref to the updated mbuf */
+ tpa_info->tpa_head = rx_mb;
+ tpa_info->tpa_tail = tpa_info->tpa_head;
}
-
rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
-
- if (CQE_HAS_VLAN(parse_flag)) {
- rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
- rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
+tpa_end:
+ if (!tpa_start_flg) {
+ rx_pkts[rx_pkt] = rx_mb;
+ rx_pkt++;
}
-
- if (CQE_HAS_OUTER_VLAN(parse_flag)) {
- /* FW does not provide indication of Outer VLAN tag,
- * which is always stripped, so vlan_tci_outer is set
- * to 0. Here vlan_tag represents inner VLAN tag.
- */
- rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
- rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
- rx_mb->vlan_tci_outer = 0;
- }
-
- rx_pkts[rx_pkt] = rx_mb;
- rx_pkt++;
next_cqe:
ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
if (rx_pkt == nb_pkts) {
PMD_RX_LOG(DEBUG, rxq,
- "Budget reached nb_pkts=%u received=%u\n",
+ "Budget reached nb_pkts=%u received=%u",
rx_pkt, nb_pkts);
break;
}
@@ -1053,108 +1230,100 @@ next_cqe:
rxq->rcv_pkts += rx_pkt;
- PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
+ PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
return rx_pkt;
}
-static inline int
-qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
+static inline void
+qede_free_tx_pkt(struct qede_tx_queue *txq)
{
- uint16_t nb_segs, idx = TX_CONS(txq);
- struct eth_tx_bd *tx_data_bd;
- struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
-
- if (unlikely(!mbuf)) {
- PMD_TX_LOG(ERR, txq, "null mbuf\n");
- PMD_TX_LOG(ERR, txq,
- "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n",
- txq->nb_tx_desc, txq->nb_tx_avail, idx,
- TX_PROD(txq));
- return -1;
- }
+ struct rte_mbuf *mbuf;
+ uint16_t nb_segs;
+ uint16_t idx;
- nb_segs = mbuf->nb_segs;
- while (nb_segs) {
- /* It's like consuming rxbuf in recv() */
+ idx = TX_CONS(txq);
+ mbuf = txq->sw_tx_ring[idx].mbuf;
+ if (mbuf) {
+ nb_segs = mbuf->nb_segs;
+ PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
+ while (nb_segs) {
+ /* It's like consuming rxbuf in recv() */
+ ecore_chain_consume(&txq->tx_pbl);
+ txq->nb_tx_avail++;
+ nb_segs--;
+ }
+ rte_pktmbuf_free(mbuf);
+ txq->sw_tx_ring[idx].mbuf = NULL;
+ txq->sw_tx_cons++;
+ PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
+ } else {
ecore_chain_consume(&txq->tx_pbl);
txq->nb_tx_avail++;
- nb_segs--;
}
- rte_pktmbuf_free(mbuf);
- txq->sw_tx_ring[idx].mbuf = NULL;
-
- return 0;
}
-static inline uint16_t
-qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
+static inline void
+qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
+ struct qede_tx_queue *txq)
{
- uint16_t tx_compl = 0;
uint16_t hw_bd_cons;
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ uint16_t sw_tx_cons;
+#endif
- hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
rte_compiler_barrier();
-
- while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
- if (qede_free_tx_pkt(edev, txq)) {
- PMD_TX_LOG(ERR, txq,
- "hw_bd_cons = %u, chain_cons = %u\n",
- hw_bd_cons,
- ecore_chain_get_cons_idx(&txq->tx_pbl));
- break;
- }
- txq->sw_tx_cons++; /* Making TXD available */
- tx_compl++;
- }
-
- PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
- tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
- return tx_compl;
+ hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
+ PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
+ abs(hw_bd_cons - sw_tx_cons));
+#endif
+ while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
+ qede_free_tx_pkt(txq);
}
/* Populate scatter gather buffer descriptor fields */
-static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq,
- struct rte_mbuf *m_seg,
- uint16_t count,
- struct eth_tx_1st_bd *bd1)
+static inline uint8_t
+qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
+ struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3)
{
struct qede_tx_queue *txq = p_txq;
- struct eth_tx_2nd_bd *bd2 = NULL;
- struct eth_tx_3rd_bd *bd3 = NULL;
struct eth_tx_bd *tx_bd = NULL;
- uint16_t nb_segs = count;
dma_addr_t mapping;
+ uint8_t nb_segs = 0;
/* Check for scattered buffers */
while (m_seg) {
- if (nb_segs == 1) {
- bd2 = (struct eth_tx_2nd_bd *)
- ecore_chain_produce(&txq->tx_pbl);
- memset(bd2, 0, sizeof(*bd2));
+ if (nb_segs == 0) {
+ if (!*bd2) {
+ *bd2 = (struct eth_tx_2nd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
+ nb_segs++;
+ }
mapping = rte_mbuf_data_dma_addr(m_seg);
- bd2->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
- bd2->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- bd2->nbytes = rte_cpu_to_le_16(m_seg->data_len);
- } else if (nb_segs == 2) {
- bd3 = (struct eth_tx_3rd_bd *)
- ecore_chain_produce(&txq->tx_pbl);
- memset(bd3, 0, sizeof(*bd3));
+ QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
+ } else if (nb_segs == 1) {
+ if (!*bd3) {
+ *bd3 = (struct eth_tx_3rd_bd *)
+ ecore_chain_produce(&txq->tx_pbl);
+ memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nb_segs++;
+ }
mapping = rte_mbuf_data_dma_addr(m_seg);
- bd3->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
- bd3->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- bd3->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
} else {
tx_bd = (struct eth_tx_bd *)
ecore_chain_produce(&txq->tx_pbl);
memset(tx_bd, 0, sizeof(*tx_bd));
+ nb_segs++;
mapping = rte_mbuf_data_dma_addr(m_seg);
- tx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
- tx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
- tx_bd->nbytes = rte_cpu_to_le_16(m_seg->data_len);
+ QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
+ PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
}
- nb_segs++;
- bd1->data.nbds = nb_segs;
m_seg = m_seg->next;
}
@@ -1162,88 +1331,314 @@ static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq,
return nb_segs;
}
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+static inline void
+print_tx_bd_info(struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *bd1,
+ struct eth_tx_2nd_bd *bd2,
+ struct eth_tx_3rd_bd *bd3,
+ uint64_t tx_ol_flags)
+{
+ char ol_buf[256] = { 0 }; /* for verbose prints */
+
+ if (bd1)
+ PMD_TX_LOG(INFO, txq,
+ "BD1: nbytes=%u nbds=%u bd_flags=04%x bf=%04x",
+ rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
+ bd1->data.bd_flags.bitfields,
+ rte_cpu_to_le_16(bd1->data.bitfields));
+ if (bd2)
+ PMD_TX_LOG(INFO, txq,
+ "BD2: nbytes=%u bf=%04x\n",
+ rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1);
+ if (bd3)
+ PMD_TX_LOG(INFO, txq,
+ "BD3: nbytes=%u bf=%04x mss=%u\n",
+ rte_cpu_to_le_16(bd3->nbytes),
+ rte_cpu_to_le_16(bd3->data.bitfields),
+ rte_cpu_to_le_16(bd3->data.lso_mss));
+
+ rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
+ PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
+}
+#endif
+
+/* TX prepare to check packets meets TX conditions */
+uint16_t
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct qede_tx_queue *txq = p_txq;
+#else
+qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+#endif
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+ uint16_t i;
+ int ret;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ /* TBD: confirm its ~9700B for both ? */
+ if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ } else {
+ if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
+ rte_errno = -EINVAL;
+ break;
+ }
+ }
+ if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ break;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ break;
+ }
+#endif
+ /* TBD: pseudo csum calcuation required iff
+ * ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE not set?
+ */
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ break;
+ }
+ }
+
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ if (unlikely(i != nb_pkts))
+ PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
+ nb_pkts - i);
+#endif
+ return i;
+}
+
uint16_t
qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
struct qede_tx_queue *txq = p_txq;
struct qede_dev *qdev = txq->qdev;
struct ecore_dev *edev = &qdev->edev;
- struct qede_fastpath *fp;
- struct eth_tx_1st_bd *bd1;
+ struct rte_mbuf *mbuf;
struct rte_mbuf *m_seg = NULL;
uint16_t nb_tx_pkts;
- uint16_t nb_pkt_sent = 0;
uint16_t bd_prod;
uint16_t idx;
- uint16_t tx_count;
- uint16_t nb_segs = 0;
-
- fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
+ uint16_t nb_frags;
+ uint16_t nb_pkt_sent = 0;
+ uint8_t nbds;
+ bool ipv6_ext_flg;
+ bool lso_flg;
+ bool tunn_flg;
+ struct eth_tx_1st_bd *bd1;
+ struct eth_tx_2nd_bd *bd2;
+ struct eth_tx_3rd_bd *bd3;
+ uint64_t tx_ol_flags;
+ uint16_t hdr_size;
if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
- PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
+ PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
- (void)qede_process_tx_compl(edev, txq);
- }
-
- nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail /
- ETH_TX_MAX_BDS_PER_NON_LSO_PACKET));
- if (unlikely(nb_tx_pkts == 0)) {
- PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
- nb_pkts, txq->nb_tx_avail);
- return 0;
+ qede_process_tx_compl(edev, txq);
}
- tx_count = nb_tx_pkts;
+ nb_tx_pkts = nb_pkts;
+ bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
while (nb_tx_pkts--) {
+ /* Init flags/values */
+ ipv6_ext_flg = false;
+ tunn_flg = false;
+ lso_flg = false;
+ nbds = 0;
+ bd1 = NULL;
+ bd2 = NULL;
+ bd3 = NULL;
+ hdr_size = 0;
+
+ mbuf = *tx_pkts++;
+ assert(mbuf);
+
+ /* Check minimum TX BDS availability against available BDs */
+ if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
+ break;
+
+ tx_ol_flags = mbuf->ol_flags;
+
+#define RTE_ETH_IS_IPV6_HDR_EXT(ptype) ((ptype) & RTE_PTYPE_L3_IPV6_EXT)
+ if (RTE_ETH_IS_IPV6_HDR_EXT(mbuf->packet_type))
+ ipv6_ext_flg = true;
+
+ if (RTE_ETH_IS_TUNNEL_PKT(mbuf->packet_type))
+ tunn_flg = true;
+
+ if (tx_ol_flags & PKT_TX_TCP_SEG)
+ lso_flg = true;
+
+ if (lso_flg) {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_LSO_PKT))
+ break;
+ } else {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
+ break;
+ }
+
+ if (tunn_flg && ipv6_ext_flg) {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
+ break;
+ }
+ if (ipv6_ext_flg) {
+ if (unlikely(txq->nb_tx_avail <
+ ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT))
+ break;
+ }
+
/* Fill the entry in the SW ring and the BDs in the FW ring */
idx = TX_PROD(txq);
- struct rte_mbuf *mbuf = *tx_pkts++;
-
txq->sw_tx_ring[idx].mbuf = mbuf;
+
+ /* BD1 */
bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
- /* Zero init struct fields */
- bd1->data.bd_flags.bitfields = 0;
- bd1->data.bitfields = 0;
+ memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
+ nbds++;
- bd1->data.bd_flags.bitfields =
+ bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
- /* Map MBUF linear data for DMA and set in the first BD */
- QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
- mbuf->pkt_len);
+ /* FW 8.10.x specific change */
+ if (!lso_flg) {
+ bd1->data.bitfields |=
+ (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
+ << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ /* Map MBUF linear data for DMA and set in the BD1 */
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ mbuf->data_len);
+ } else {
+ /* For LSO, packet header and payload must reside on
+ * buffers pointed by different BDs. Using BD1 for HDR
+ * and BD2 onwards for data.
+ */
+ hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
+ QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
+ hdr_size);
+ }
+
+ if (tunn_flg) {
+ /* First indicate its a tunnel pkt */
+ bd1->data.bd_flags.bitfields |=
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
+ ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ /* Legacy FW had flipped behavior in regard to this bit
+ * i.e. it needed to set to prevent FW from touching
+ * encapsulated packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy))
+ bd1->data.bitfields ^=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+
+ /* Outer IP checksum offload */
+ if (tx_ol_flags & PKT_TX_OUTER_IP_CKSUM) {
+ bd1->data.bd_flags.bitfields |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ }
+
+ /* Outer UDP checksum offload */
+ bd1->data.bd_flags.bitfields |=
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
+ ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+ }
/* Descriptor based VLAN insertion */
- if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+ if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
}
+ if (lso_flg)
+ bd1->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
+
/* Offload the IP checksum in the hardware */
- if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
+ if ((lso_flg) || (tx_ol_flags & PKT_TX_IP_CKSUM))
bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- }
/* L4 checksum offload (tcp or udp) */
- if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ if ((lso_flg) || (tx_ol_flags & (PKT_TX_TCP_CKSUM |
+ PKT_TX_UDP_CKSUM)))
+ /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
bd1->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
- /* IPv6 + extn. -> later */
+
+ /* BD2 */
+ if (lso_flg || ipv6_ext_flg) {
+ bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
+ (&txq->tx_pbl);
+ memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
+ nbds++;
+ QEDE_BD_SET_ADDR_LEN(bd2,
+ (hdr_size +
+ rte_mbuf_data_dma_addr(mbuf)),
+ mbuf->data_len - hdr_size);
+ /* TBD: check pseudo csum iff tx_prepare not called? */
+ if (ipv6_ext_flg) {
+ bd2->data.bitfields1 |=
+ ETH_L4_PSEUDO_CSUM_ZERO_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
+ }
+ }
+
+ /* BD3 */
+ if (lso_flg || ipv6_ext_flg) {
+ bd3 = (struct eth_tx_3rd_bd *)ecore_chain_produce
+ (&txq->tx_pbl);
+ memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
+ nbds++;
+ if (lso_flg) {
+ bd3->data.lso_mss =
+ rte_cpu_to_le_16(mbuf->tso_segsz);
+ /* Using one header BD */
+ bd3->data.bitfields |=
+ rte_cpu_to_le_16(1 <<
+ ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+ }
}
/* Handle fragmented MBUF */
m_seg = mbuf->next;
- nb_segs++;
- bd1->data.nbds = nb_segs;
/* Encode scatter gather buffer descriptors if required */
- nb_segs = qede_encode_sg_bd(txq, m_seg, nb_segs, bd1);
- txq->nb_tx_avail = txq->nb_tx_avail - nb_segs;
- nb_segs = 0;
+ nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3);
+ bd1->data.nbds = nbds + nb_frags;
+ txq->nb_tx_avail -= bd1->data.nbds;
txq->sw_tx_prod++;
rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
bd_prod =
rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
+#ifdef RTE_LIBRTE_QEDE_DEBUG_TX
+ print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
+ PMD_TX_LOG(INFO, txq, "lso=%d tunn=%d ipv6_ext=%d\n",
+ lso_flg, tunn_flg, ipv6_ext_flg);
+#endif
nb_pkt_sent++;
txq->xmit_pkts++;
}
@@ -1252,14 +1647,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
txq->tx_db.data.bd_prod = bd_prod;
rte_wmb();
rte_compiler_barrier();
- DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
+ DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
rte_wmb();
/* Check again for Tx completions */
- (void)qede_process_tx_compl(edev, txq);
+ qede_process_tx_compl(edev, txq);
- PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
- nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
+ PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
+ nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
return nb_pkt_sent;
}
@@ -1268,7 +1663,7 @@ static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct qede_fastpath *fp;
- uint8_t i, rss_id, txq_index, tc;
+ uint8_t i, txq_index, tc;
int rxq = 0, txq = 0;
for_each_queue(i) {
@@ -1284,6 +1679,8 @@ static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
fp->txqs[tc] =
eth_dev->data->tx_queues[txq_index];
fp->txqs[tc]->queue_id = txq_index;
+ if (qdev->dev_info.is_legacy)
+ fp->txqs[tc]->is_legacy = true;
}
txq++;
}
@@ -1294,9 +1691,7 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
{
struct qede_dev *qdev = eth_dev->data->dev_private;
struct ecore_dev *edev = &qdev->edev;
- struct qed_link_output link_output;
- struct qede_fastpath *fp;
- int rc, i;
+ int rc;
DP_INFO(edev, "Device state is %d\n", qdev->state);
@@ -1315,13 +1710,18 @@ int qede_dev_start(struct rte_eth_dev *eth_dev)
return rc;
}
+ /* Newer SR-IOV PF driver expects RX/TX queues to be started before
+ * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * Also, we would like to retain similar behavior in PF case, so we
+ * don't do PF/VF specific check here.
+ */
+ if (eth_dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ if (qede_config_rss(eth_dev))
+ return -1;
+
/* Bring-up the link */
qede_dev_set_link_state(eth_dev, true);
- /* Reset ring */
- if (qede_reset_fp_rings(qdev))
- return -ENOMEM;
-
/* Start/resume traffic */
qdev->ops->fastpath_start(edev);
@@ -1342,8 +1742,7 @@ static int qede_drain_txq(struct qede_dev *qdev,
qede_process_tx_compl(edev, txq);
if (!cnt) {
if (allow_drain) {
- DP_NOTICE(edev, false,
- "Tx queue[%u] is stuck,"
+ DP_ERR(edev, "Tx queue[%u] is stuck,"
"requesting MCP to drain\n",
txq->queue_id);
rc = qdev->ops->common->drain(edev);
@@ -1351,13 +1750,11 @@ static int qede_drain_txq(struct qede_dev *qdev,
return rc;
return qede_drain_txq(qdev, txq, false);
}
-
- DP_NOTICE(edev, false,
- "Timeout waiting for tx queue[%d]:"
+ DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
"PROD=%d, CONS=%d\n",
txq->queue_id, txq->sw_tx_prod,
txq->sw_tx_cons);
- return -ENODEV;
+ return -1;
}
cnt--;
DELAY(1000);
@@ -1374,6 +1771,8 @@ static int qede_stop_queues(struct qede_dev *qdev)
{
struct qed_update_vport_params vport_update_params;
struct ecore_dev *edev = &qdev->edev;
+ struct ecore_sge_tpa_params tpa_params;
+ struct qede_fastpath *fp;
int rc, tc, i;
/* Disable the vport */
@@ -1382,9 +1781,15 @@ static int qede_stop_queues(struct qede_dev *qdev)
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 0;
vport_update_params.update_rss_flg = 0;
+ /* Disable TPA */
+ if (qdev->enable_lro) {
+ DP_INFO(edev, "Disabling LRO\n");
+ memset(&tpa_params, 0, sizeof(struct ecore_sge_tpa_params));
+ qede_update_sge_tpa_params(&tpa_params, qdev->mtu, false);
+ vport_update_params.sge_tpa_params = &tpa_params;
+ }
DP_INFO(edev, "Deactivate vport\n");
-
rc = qdev->ops->vport_update(edev, &vport_update_params);
if (rc) {
DP_ERR(edev, "Failed to update vport\n");
@@ -1395,7 +1800,7 @@ static int qede_stop_queues(struct qede_dev *qdev)
/* Flush Tx queues. If needed, request drain from MCP */
for_each_queue(i) {
- struct qede_fastpath *fp = &qdev->fp_array[i];
+ fp = &qdev->fp_array[i];
if (fp->type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < qdev->num_tc; tc++) {
@@ -1410,23 +1815,17 @@ static int qede_stop_queues(struct qede_dev *qdev)
/* Stop all Queues in reverse order */
for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
- struct qed_stop_rxq_params rx_params;
+ fp = &qdev->fp_array[i];
/* Stop the Tx Queue(s) */
if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
for (tc = 0; tc < qdev->num_tc; tc++) {
- struct qed_stop_txq_params tx_params;
- u8 val;
-
- tx_params.rss_id = i;
- val = qdev->fp_array[i].txqs[tc]->queue_id;
- tx_params.tx_queue_id = val;
-
+ struct qede_tx_queue *txq = fp->txqs[tc];
DP_INFO(edev, "Stopping tx queues\n");
- rc = qdev->ops->q_tx_stop(edev, &tx_params);
+ rc = qdev->ops->q_tx_stop(edev, i, txq->handle);
if (rc) {
DP_ERR(edev, "Failed to stop TXQ #%d\n",
- tx_params.tx_queue_id);
+ i);
return rc;
}
}
@@ -1434,20 +1833,15 @@ static int qede_stop_queues(struct qede_dev *qdev)
/* Stop the Rx Queue */
if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
- memset(&rx_params, 0, sizeof(rx_params));
- rx_params.rss_id = i;
- rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id;
- rx_params.eq_completion_only = 1;
-
DP_INFO(edev, "Stopping rx queues\n");
-
- rc = qdev->ops->q_rx_stop(edev, &rx_params);
+ rc = qdev->ops->q_rx_stop(edev, i, fp->rxq->handle);
if (rc) {
DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
return rc;
}
}
}
+ qede_reset_fp_rings(qdev);
return 0;
}
@@ -1506,10 +1900,14 @@ void qede_free_mem_load(struct rte_eth_dev *eth_dev)
for_each_queue(id) {
fp = &qdev->fp_array[id];
if (fp->type & QEDE_FASTPATH_RX) {
+ if (!fp->rxq)
+ continue;
qede_rx_queue_release(fp->rxq);
eth_dev->data->rx_queues[id] = NULL;
} else {
for (tc = 0; tc < qdev->num_tc; tc++) {
+ if (!fp->txqs[tc])
+ continue;
txq_idx = fp->txqs[tc]->queue_id;
qede_tx_queue_release(fp->txqs[tc]);
eth_dev->data->tx_queues[txq_idx] = NULL;
@@ -1544,3 +1942,11 @@ void qede_dev_stop(struct rte_eth_dev *eth_dev)
DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
}
+
+uint16_t
+qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h
index ed9a529b..a1bbd256 100644
--- a/drivers/net/qede/qede_rxtx.h
+++ b/drivers/net/qede/qede_rxtx.h
@@ -41,10 +41,6 @@
(bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \
(bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \
(bd)->nbytes = rte_cpu_to_le_16(len); \
- /* FW 8.10.x specific change */ \
- (bd)->data.bitfields = ((len) & \
- ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) \
- << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; \
} while (0)
#define CQE_HAS_VLAN(flags) \
@@ -55,18 +51,29 @@
((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \
<< PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT))
+#define QEDE_MIN_RX_BUFF_SIZE (1024)
+#define QEDE_VLAN_TAG_SIZE (4)
+#define QEDE_LLC_SNAP_HDR_LEN (8)
+
/* Max supported alignment is 256 (8 shift)
* minimal alignment shift 6 is optimal for 57xxx HW performance
*/
#define QEDE_L1_CACHE_SHIFT 6
#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT)))
#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT)
-
-#define QEDE_ETH_OVERHEAD (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END)
-
-/* TBD: Excluding IPV6 */
-#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \
- ETH_RSS_NONFRAG_IPV4_UDP)
+#define QEDE_CEIL_TO_CACHE_LINE_SIZE(n) (((n) + (QEDE_FW_RX_ALIGN_END - 1)) & \
+ ~(QEDE_FW_RX_ALIGN_END - 1))
+/* Note: QEDE_LLC_SNAP_HDR_LEN is optional */
+#define QEDE_ETH_OVERHEAD ((ETHER_HDR_LEN) + ((2 * QEDE_VLAN_TAG_SIZE)) \
+ + (QEDE_LLC_SNAP_HDR_LEN))
+
+#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 |\
+ ETH_RSS_NONFRAG_IPV4_TCP |\
+ ETH_RSS_NONFRAG_IPV4_UDP |\
+ ETH_RSS_IPV6 |\
+ ETH_RSS_NONFRAG_IPV6_TCP |\
+ ETH_RSS_NONFRAG_IPV6_UDP |\
+ ETH_RSS_VXLAN)
#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS)
@@ -74,6 +81,64 @@
#define for_each_queue(i) for (i = 0; i < qdev->num_queues; i++)
+
+/* Macros for non-tunnel packet types lkup table */
+#define QEDE_PKT_TYPE_UNKNOWN 0x0
+#define QEDE_PKT_TYPE_MAX 0xf
+#define QEDE_PKT_TYPE_IPV4 0x1
+#define QEDE_PKT_TYPE_IPV6 0x2
+#define QEDE_PKT_TYPE_IPV4_TCP 0x5
+#define QEDE_PKT_TYPE_IPV6_TCP 0x6
+#define QEDE_PKT_TYPE_IPV4_UDP 0x9
+#define QEDE_PKT_TYPE_IPV6_UDP 0xa
+
+/* Macros for tunneled packets with next protocol lkup table */
+#define QEDE_PKT_TYPE_TUNN_GENEVE 0x1
+#define QEDE_PKT_TYPE_TUNN_GRE 0x2
+#define QEDE_PKT_TYPE_TUNN_VXLAN 0x3
+
+/* Bit 2 is don't care bit */
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE 0x9
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE 0xa
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN 0xb
+
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE 0xd
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE 0xe
+#define QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN 0xf
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE 0x11
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE 0x12
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN 0x13
+
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE 0x15
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE 0x16
+#define QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN 0x17
+
+
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE 0x19
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE 0x1a
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN 0x1b
+
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE 0x1d
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE 0x1e
+#define QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN 0x1f
+
+#define QEDE_PKT_TYPE_TUNN_MAX_TYPE 0x20 /* 2^5 */
+
+#define QEDE_TX_CSUM_OFFLOAD_MASK (PKT_TX_IP_CKSUM | \
+ PKT_TX_TCP_CKSUM | \
+ PKT_TX_UDP_CKSUM | \
+ PKT_TX_OUTER_IP_CKSUM | \
+ PKT_TX_TCP_SEG)
+
+#define QEDE_TX_OFFLOAD_MASK (QEDE_TX_CSUM_OFFLOAD_MASK | \
+ PKT_TX_QINQ_PKT | \
+ PKT_TX_VLAN_PKT)
+
+#define QEDE_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ QEDE_TX_OFFLOAD_MASK)
+
/*
* RX BD descriptor ring
*/
@@ -83,6 +148,12 @@ struct qede_rx_entry {
/* allows expansion .. */
};
+/* TPA related structures */
+struct qede_agg_info {
+ struct rte_mbuf *tpa_head; /* Pointer to first TPA segment */
+ struct rte_mbuf *tpa_tail; /* Pointer to last TPA segment */
+};
+
/*
* Structure associated with each RX queue.
*/
@@ -103,7 +174,9 @@ struct qede_rx_queue {
uint64_t rx_segs;
uint64_t rx_hw_errors;
uint64_t rx_alloc_errors;
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
struct qede_dev *qdev;
+ void *handle;
};
/*
@@ -133,7 +206,9 @@ struct qede_tx_queue {
volatile union db_prod tx_db;
uint16_t port_id;
uint64_t xmit_pkts;
+ bool is_legacy;
struct qede_dev *qdev;
+ void *handle;
};
struct qede_fastpath {
@@ -177,9 +252,16 @@ void qede_free_mem_load(struct rte_eth_dev *eth_dev);
uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
+uint16_t qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
+ __rte_unused struct rte_mbuf **pkts,
+ __rte_unused uint16_t nb_pkts);
+
/* Fastpath resource alloc/dealloc helpers */
int qede_alloc_fp_resc(struct qede_dev *qdev);
diff --git a/drivers/net/ring/Makefile b/drivers/net/ring/Makefile
index ae835052..b7e1a378 100644
--- a/drivers/net/ring/Makefile
+++ b/drivers/net/ring/Makefile
@@ -53,9 +53,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_RING) += rte_eth_ring.c
#
SYMLINK-y-include += rte_eth_ring.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_eal lib/librte_ring
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_mbuf lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c
index c1767c48..87d22581 100644
--- a/drivers/net/ring/rte_eth_ring.c
+++ b/drivers/net/ring/rte_eth_ring.c
@@ -75,7 +75,6 @@ struct pmd_internals {
};
-static const char *drivername = "Rings PMD";
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -89,7 +88,7 @@ eth_ring_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_rx = (uint16_t)rte_ring_dequeue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SC_DEQ)
r->rx_pkts.cnt += nb_rx;
else
@@ -103,7 +102,7 @@ eth_ring_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
void **ptrs = (void *)&bufs[0];
struct ring_queue *r = q;
const uint16_t nb_tx = (uint16_t)rte_ring_enqueue_burst(r->rng,
- ptrs, nb_bufs);
+ ptrs, nb_bufs, NULL);
if (r->rng->flags & RING_F_SP_ENQ) {
r->tx_pkts.cnt += nb_tx;
r->err_pkts.cnt += nb_bufs - nb_tx;
@@ -173,13 +172,11 @@ eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
- dev_info->driver_name = drivername;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = (uint16_t)internals->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)internals->max_tx_queues;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static void
@@ -227,12 +224,13 @@ eth_mac_addr_remove(struct rte_eth_dev *dev __rte_unused,
{
}
-static void
+static int
eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused,
struct ether_addr *mac_addr __rte_unused,
uint32_t index __rte_unused,
uint32_t vmdq __rte_unused)
{
+ return -ENOTSUP;
}
static void
@@ -259,6 +257,8 @@ static const struct eth_dev_ops ops = {
.mac_addr_add = eth_mac_addr_add,
};
+static struct rte_vdev_driver pmd_ring_drv;
+
static int
do_eth_dev_ring_create(const char *name,
struct rte_ring * const rx_queues[], const unsigned nb_rx_queues,
@@ -339,15 +339,12 @@ do_eth_dev_ring_create(const char *name,
data->mac_addrs = &internals->address;
eth_dev->data = data;
- eth_dev->driver = NULL;
eth_dev->dev_ops = &ops;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
data->kdrv = RTE_KDRV_NONE;
- data->drv_name = drivername;
+ data->drv_name = pmd_ring_drv.driver.name;
data->numa_node = numa_node;
- TAILQ_INIT(&(eth_dev->link_intr_cbs));
-
/* finally assign rx and tx ops */
eth_dev->rx_pkt_burst = eth_ring_rx;
eth_dev->tx_pkt_burst = eth_ring_tx;
@@ -505,12 +502,16 @@ out:
}
static int
-rte_pmd_ring_probe(const char *name, const char *params)
+rte_pmd_ring_probe(struct rte_vdev_device *dev)
{
+ const char *name, *params;
struct rte_kvargs *kvlist = NULL;
int ret = 0;
struct node_action_list *info = NULL;
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+
RTE_LOG(INFO, PMD, "Initializing pmd_ring for %s\n", name);
if (params == NULL || params[0] == '\0') {
@@ -580,8 +581,9 @@ out_free:
}
static int
-rte_pmd_ring_remove(const char *name)
+rte_pmd_ring_remove(struct rte_vdev_device *dev)
{
+ const char *name = rte_vdev_device_name(dev);
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internals = NULL;
struct ring_queue *r = NULL;
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
new file mode 100644
index 00000000..57aa963b
--- /dev/null
+++ b/drivers/net/sfc/Makefile
@@ -0,0 +1,143 @@
+#
+# BSD LICENSE
+#
+# Copyright (c) 2016-2017 Solarflare Communications Inc.
+# All rights reserved.
+#
+# This software was jointly developed between OKTET Labs (under contract
+# for Solarflare) and Solarflare Communications, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_sfc_efx.a
+
+CFLAGS += -I$(SRCDIR)/base/
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+# Strict-aliasing rules are violated by rte_eth_link to uint64_t casts
+CFLAGS += -Wno-strict-aliasing
+
+# Enable extra warnings
+CFLAGS += -Wextra
+
+# More warnings not enabled by above aggregators
+CFLAGS += -Wdisabled-optimization
+
+# Extra CFLAGS for base driver files
+CFLAGS_BASE_DRIVER += -Wno-sign-compare
+CFLAGS_BASE_DRIVER += -Wno-unused-parameter
+CFLAGS_BASE_DRIVER += -Wno-unused-variable
+
+# Compiler and version dependent flags
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS += -Waggregate-return
+CFLAGS += -Wnested-externs
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS += -Waggregate-return
+CFLAGS += -Wbad-function-cast
+CFLAGS_BASE_DRIVER += -Wno-empty-body
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable
+endif
+
+#
+# List of base driver object files for which
+# special CFLAGS above should be applied
+#
+BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c)))
+$(foreach obj, $(BASE_DRIVER_OBJS), \
+ $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+EXPORT_MAP := rte_pmd_sfc_efx_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_kvargs.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_port.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tso.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c
+
+VPATH += $(SRCDIR)/base
+
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_bootcfg.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_crc32.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_lic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_mon.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_port.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_sram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += efx_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += mcdi_mon.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_sram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += siena_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_ev.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_filter.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_intr.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_mcdi.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_nvram.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_tx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += ef10_vpd.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += hunt_nic.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += medford_nic.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/sfc/base/README b/drivers/net/sfc/base/README
new file mode 100644
index 00000000..9019e8ba
--- /dev/null
+++ b/drivers/net/sfc/base/README
@@ -0,0 +1,36 @@
+
+ Copyright (c) 2006-2016 Solarflare Communications Inc.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Solarflare libefx driver library
+================================
+
+This directory contains source code of Solarflare Communications libefx
+driver library of version v4.10.0.1012.
+
+Updating
+========
+
+The source code in this directory should not be modified.
+Please contact the driver maintainers to request changes.
diff --git a/drivers/net/sfc/base/ef10_ev.c b/drivers/net/sfc/base/ef10_ev.c
new file mode 100644
index 00000000..35226749
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_ev.c
@@ -0,0 +1,1401 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_STATS
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+/*
+ * Non-interrupting event queue requires interrrupting event queue to
+ * refer to for wake-up events even if wake ups are never used.
+ * It could be even non-allocated event queue.
+ */
+#define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_set_evq_tmr(
+ __in efx_nic_t *enp,
+ __in uint32_t instance,
+ __in uint32_t mode,
+ __in uint32_t timer_ns)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_EVQ_TMR_IN_LEN,
+ MC_CMD_SET_EVQ_TMR_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_EVQ_TMR;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
+ MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_evq(
+ __in efx_nic_t *enp,
+ __in unsigned int instance,
+ __in efsys_mem_t *esmp,
+ __in size_t nevs,
+ __in uint32_t irq,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in boolean_t low_latency)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[
+ MAX(MC_CMD_INIT_EVQ_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ boolean_t interrupting;
+ int ev_cut_through;
+ efx_rc_t rc;
+
+ npages = EFX_EVQ_NBUFS(nevs);
+ if (MC_CMD_INIT_EVQ_IN_LEN(npages) > MC_CMD_INIT_EVQ_IN_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_EVQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_SIZE, nevs);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_IRQ_NUM, irq);
+
+ interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ /*
+ * On Huntington RX and TX event batching can only be requested together
+ * (even if the datapath firmware doesn't actually support RX
+ * batching). If event cut through is enabled no RX batching will occur.
+ *
+ * So always enable RX and TX event batching, and enable event cut
+ * through if we want low latency operation.
+ */
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ ev_cut_through = low_latency ? 1 : 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ ev_cut_through = 0;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ ev_cut_through = 1;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+ MCDI_IN_POPULATE_DWORD_6(req, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, interrupting,
+ INIT_EVQ_IN_FLAG_RPTR_DOS, 0,
+ INIT_EVQ_IN_FLAG_INT_ARMD, 0,
+ INIT_EVQ_IN_FLAG_CUT_THRU, ev_cut_through,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1);
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_LOAD, ticks);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_TMR_RELOAD, ticks);
+ }
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+
+ /* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_evq_v2(
+ __in efx_nic_t *enp,
+ __in unsigned int instance,
+ __in efsys_mem_t *esmp,
+ __in size_t nevs,
+ __in uint32_t irq,
+ __in uint32_t us,
+ __in uint32_t flags)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[
+ MAX(MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_EVQ_NBUFS(EFX_EVQ_MAXNEVS)),
+ MC_CMD_INIT_EVQ_V2_OUT_LEN)];
+ boolean_t interrupting;
+ unsigned int evq_type;
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ npages = EFX_EVQ_NBUFS(nevs);
+ if (MC_CMD_INIT_EVQ_V2_IN_LEN(npages) > MC_CMD_INIT_EVQ_V2_IN_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq);
+
+ interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) {
+ case EFX_EVQ_FLAGS_TYPE_AUTO:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_THROUGHPUT:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT;
+ break;
+ case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY:
+ evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+ MCDI_IN_POPULATE_DWORD_4(req, INIT_EVQ_V2_IN_FLAGS,
+ INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting,
+ INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0,
+ INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0,
+ INIT_EVQ_V2_IN_FLAG_TYPE, evq_type);
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks);
+ }
+
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS);
+ MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail4;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail5;
+ }
+
+ /* NOTE: ignore the returned IRQ param as firmware does not set it. */
+
+ EFSYS_PROBE1(mcdi_evq_flags, uint32_t,
+ MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS));
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_evq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_EVQ_IN_LEN,
+ MC_CMD_FINI_EVQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_EVQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t irq;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = ef10_ev_rx;
+ eep->ee_tx = ef10_ev_tx;
+ eep->ee_driver = ef10_ev_driver;
+ eep->ee_drv_gen = ef10_ev_drv_gen;
+ eep->ee_mcdi = ef10_ev_mcdi;
+
+ /* Set up the event queue */
+ /* INIT_EVQ expects function-relative vector number */
+ if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
+ irq = index;
+ } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
+ irq = index;
+ flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+ } else {
+ irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
+ }
+
+ /*
+ * Interrupts may be raised for events immediately after the queue is
+ * created. See bug58606.
+ */
+
+ if (encp->enc_init_evq_v2_supported) {
+ /*
+ * On Medford the low latency license is required to enable RX
+ * and event cut through and to disable RX batching. If event
+ * queue type in flags is auto, we let the firmware decide the
+ * settings to use. If the adapter has a low latency license,
+ * it will choose the best settings for low latency, otherwise
+ * it will choose the best settings for throughput.
+ */
+ rc = efx_mcdi_init_evq_v2(enp, index, esmp, n, irq, us, flags);
+ if (rc != 0)
+ goto fail4;
+ } else {
+ /*
+ * On Huntington we need to specify the settings to use.
+ * If event queue type in flags is auto, we favour throughput
+ * if the adapter is running virtualization supporting firmware
+ * (i.e. the full featured firmware variant)
+ * and latency otherwise. The Ethernet Virtual Bridging
+ * capability is used to make this decision. (Note though that
+ * the low latency firmware variant is also best for
+ * throughput and corresponding type should be specified
+ * to choose it.)
+ */
+ boolean_t low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
+ rc = efx_mcdi_init_evq(enp, index, esmp, n, irq, us, flags,
+ low_latency);
+ if (rc != 0)
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) efx_mcdi_fini_evq(eep->ee_enp, eep->ee_index);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ if (enp->en_nic_cfg.enc_bug35388_workaround) {
+ EFX_STATIC_ASSERT(EFX_EVQ_MINNEVS >
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_STATIC_ASSERT(EFX_EVQ_MAXNEVS <
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
+ &dword, B_FALSE);
+ } else {
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+ }
+
+ return (0);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_driver_event(
+ __in efx_nic_t *enp,
+ __in uint32_t evq,
+ __in efx_qword_t data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRIVER_EVENT_IN_LEN,
+ MC_CMD_DRIVER_EVENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_DRIVER_EVENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
+
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
+ EFX_QWORD_FIELD(data, EFX_DWORD_0));
+ MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
+ EFX_QWORD_FIELD(data, EFX_DWORD_1));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_3(event,
+ ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
+ ESF_DZ_DRV_SUB_CODE, 0,
+ ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
+
+ (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
+}
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_dword_t dword;
+ uint32_t mode;
+ efx_rc_t rc;
+
+ /* Check that hardware and MCDI use the same timer MODE values */
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
+ EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
+ MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ mode = FFE_CZ_TIMER_MODE_DIS;
+ } else {
+ mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
+ }
+
+ if (encp->enc_bug61265_workaround) {
+ uint32_t ns = us * 1000;
+
+ rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
+ if (rc != 0)
+ goto fail2;
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail3;
+
+ if (encp->enc_bug35388_workaround) {
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, ticks);
+ EFX_BAR_TBL_WRITED(enp, ER_DD_EVQ_INDIRECT,
+ eep->ee_index, &dword, 0);
+ } else {
+ EFX_POPULATE_DWORD_2(dword,
+ ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, ticks);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_EVQ_TMR_REG,
+ eep->ee_index, &dword, 0);
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+static __checkReturn boolean_t
+ef10_ev_rx_packed_stream(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t label;
+ uint32_t next_read_lbits;
+ uint16_t flags;
+ boolean_t should_abort;
+ efx_evq_rxq_state_t *eersp;
+ unsigned int pkt_count;
+ unsigned int current_id;
+ boolean_t new_buffer;
+
+ next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+ new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
+
+ flags = 0;
+
+ eersp = &eep->ee_rxq_state[label];
+ pkt_count = (EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS) + 1 +
+ next_read_lbits - eersp->eers_rx_stream_npackets) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ eersp->eers_rx_stream_npackets += pkt_count;
+
+ if (new_buffer) {
+ flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
+ if (eersp->eers_rx_packed_stream_credits <
+ EFX_RX_PACKED_STREAM_MAX_CREDITS)
+ eersp->eers_rx_packed_stream_credits++;
+ eersp->eers_rx_read_ptr++;
+ }
+ current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
+
+ /* Check for errors that invalidate checksum and L3/L4 fields */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
+ /* RX frame truncated (error flag is misnamed) */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+ /* Bad Ethernet frame CRC */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+ flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
+ goto deliver;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+
+deliver:
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
+ should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
+ flags);
+
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+static __checkReturn boolean_t
+ef10_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t size;
+ uint32_t label;
+ uint32_t mac_class;
+ uint32_t eth_tag_class;
+ uint32_t l3_class;
+ uint32_t l4_class;
+ uint32_t next_read_lbits;
+ uint16_t flags;
+ boolean_t cont;
+ boolean_t should_abort;
+ efx_evq_rxq_state_t *eersp;
+ unsigned int desc_count;
+ unsigned int last_used_id;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ /* Basic packet information */
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
+ eersp = &eep->ee_rxq_state[label];
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * Packed stream events are very different,
+ * so handle them separately
+ */
+ if (eersp->eers_rx_packed_stream)
+ return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
+#endif
+
+ size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
+ next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
+ eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
+ mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
+ l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
+ l4_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L4_CLASS);
+ cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+ flags = 0;
+
+ if (cont != 0) {
+ /*
+ * This may be part of a scattered frame, or it may be a
+ * truncated frame if scatter is disabled on this RXQ.
+ * Overlength frames can be received if e.g. a VF is configured
+ * for 1500 MTU but connected to a port set to 9000 MTU
+ * (see bug56567).
+ * FIXME: There is not yet any driver that supports scatter on
+ * Huntington. Scatter support is required for OSX.
+ */
+ flags |= EFX_PKT_CONT;
+ }
+
+ if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
+ flags |= EFX_PKT_UNICAST;
+
+ /* Increment the count of descriptors read */
+ desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ eersp->eers_rx_read_ptr += desc_count;
+
+ /*
+ * FIXME: add error checking to make sure this a batched event.
+ * This could also be an aborted scatter, see Bug36629.
+ */
+ if (desc_count > 1) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
+ flags |= EFX_PKT_PREFIX_LEN;
+ }
+
+ /* Calculate the index of the last descriptor consumed */
+ last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
+
+ /* Check for errors that invalidate checksum and L3/L4 fields */
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECC_ERR) != 0) {
+ /* RX frame truncated (error flag is misnamed) */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
+ /* Bad Ethernet frame CRC */
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ flags |= EFX_DISCARD;
+ goto deliver;
+ }
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
+ /*
+ * Hardware parse failed, due to malformed headers
+ * or headers that are too long for the parser.
+ * Headers and checksums must be validated by the host.
+ */
+ /* TODO: EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); */
+ goto deliver;
+ }
+
+ if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
+ (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ switch (l3_class) {
+ case ESE_DZ_L3_CLASS_IP4:
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ flags |= EFX_PKT_IPV4;
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_IPV4;
+ }
+
+ if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ }
+ break;
+
+ case ESE_DZ_L3_CLASS_IP6:
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ flags |= EFX_PKT_IPV6;
+
+ if (l4_class == ESE_DZ_L4_CLASS_TCP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_TCP;
+ } else if (l4_class == ESE_DZ_L4_CLASS_UDP) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_UDP;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ }
+ break;
+
+ default:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ break;
+ }
+
+ if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ } else {
+ flags |= EFX_CKSUM_TCPUDP;
+ }
+ }
+
+deliver:
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ /* Discard events after RXQ/TXQ errors */
+ if (enp->en_reset_flags & (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR))
+ return (B_FALSE);
+
+ if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
+ /* Drop this event */
+ return (B_FALSE);
+ }
+
+ /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
+ label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ unsigned int code;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
+ switch (code) {
+ case ESE_DZ_DRV_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+ break;
+ }
+
+ case ESE_DZ_DRV_START_UP_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+ break;
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+ should_abort = B_FALSE;
+
+ data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+ef10_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int code;
+ boolean_t should_abort = B_FALSE;
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ case MCDI_EVENT_CODE_PROXY_RESPONSE:
+ /*
+ * This event notifies a function that an authorization request
+ * has been processed. If the request was authorized then the
+ * function can now re-send the original MCDI request.
+ * See SF-113652-SW "SR-IOV Proxied Network Access Control".
+ */
+ efx_mcdi_ev_proxy_response(enp,
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
+ MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
+ break;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ ef10_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ /* Decode monitor stat for MCDI sensor (if supported) */
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
+ /* Report monitor stat change */
+ should_abort = eecp->eec_monitor(arg, id, value);
+ } else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else {
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+ }
+#endif
+ break;
+ }
+
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ /* Falcon/Siena only (should not been seen with Huntington). */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MC_REBOOT:
+ /* MC_REBOOT event is used for Huntington (EF10) and later. */
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_ERR: {
+ /*
+ * After a TXQ error is detected, firmware sends a TX_ERR event.
+ * This may be followed by TX completions (which we discard),
+ * and then finally by a TX_FLUSH event. Firmware destroys the
+ * TXQ automatically after sending the TX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
+
+ EFSYS_PROBE2(tx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
+ MCDI_EV_FIELD(eqp, TX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_TX_FLUSH: {
+ uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
+
+ /*
+ * EF10 firmware sends two TX_FLUSH events: one to the txq's
+ * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with TX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_ERR: {
+ /*
+ * After an RXQ error is detected, firmware sends an RX_ERR
+ * event. This may be followed by RX events (which we discard),
+ * and then finally by an RX_FLUSH event. Firmware destroys the
+ * RXQ automatically after sending the RX_FLUSH event.
+ */
+ enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
+
+ EFSYS_PROBE2(rx_descq_err,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ /* Inform the driver that a reset is required. */
+ eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
+ MCDI_EV_FIELD(eqp, RX_ERR_DATA));
+ break;
+ }
+
+ case MCDI_EVENT_CODE_RX_FLUSH: {
+ uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
+
+ /*
+ * EF10 firmware sends two RX_FLUSH events: one to the rxq's
+ * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
+ * We want to wait for all completions, so ignore the events
+ * with RX_FLUSH_TO_DRIVER.
+ */
+ if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
+ should_abort = B_FALSE;
+ break;
+ }
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ break;
+ }
+
+ default:
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ break;
+ }
+
+ return (should_abort);
+}
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label,
+ __in boolean_t packed_stream)
+{
+ efx_evq_rxq_state_t *eersp;
+
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ /*
+ * For packed stream modes, the very first event will
+ * have a new buffer flag set, so it will be incremented,
+ * yielding the correct pointer. That results in a simpler
+ * code than trying to detect start-of-the-world condition
+ * in the event handler.
+ */
+ eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
+#else
+ eersp->eers_rx_read_ptr = 0;
+#endif
+ eersp->eers_rx_mask = erp->er_mask;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ eersp->eers_rx_stream_npackets = 0;
+ eersp->eers_rx_packed_stream = packed_stream;
+ if (packed_stream) {
+ eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
+ (EFX_RX_PACKED_STREAM_MEM_PER_CREDIT /
+ EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
+ EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
+ /*
+ * A single credit is allocated to the queue when it is started.
+ * It is immediately spent by the first packet which has NEW
+ * BUFFER flag set, though, but still we shall take into
+ * account, as to not wrap around the maximum number of credits
+ * accidentally
+ */
+ eersp->eers_rx_packed_stream_credits--;
+ EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
+ EFX_RX_PACKED_STREAM_MAX_CREDITS);
+ }
+#else
+ EFSYS_ASSERT(!packed_stream);
+#endif
+}
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label)
+{
+ efx_evq_rxq_state_t *eersp;
+
+ EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
+ eersp = &eep->ee_rxq_state[label];
+
+ EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
+
+ eersp->eers_rx_read_ptr = 0;
+ eersp->eers_rx_mask = 0;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ eersp->eers_rx_stream_npackets = 0;
+ eersp->eers_rx_packed_stream = B_FALSE;
+ eersp->eers_rx_packed_stream_credits = 0;
+#endif
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_filter.c b/drivers/net/sfc/base/ef10_filter.c
new file mode 100644
index 00000000..695bb847
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_filter.c
@@ -0,0 +1,1501 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_FILTER
+
+#define EFE_SPEC(eftp, index) ((eftp)->eft_entry[(index)].efe_spec)
+
+static efx_filter_spec_t *
+ef10_filter_entry_spec(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ return ((efx_filter_spec_t *)(EFE_SPEC(eftp, index) &
+ ~(uintptr_t)EFX_EF10_FILTER_FLAGS));
+}
+
+static boolean_t
+ef10_filter_entry_is_busy(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_BUSY)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static boolean_t
+ef10_filter_entry_is_auto_old(
+ __in const ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ if (EFE_SPEC(eftp, index) & EFX_EF10_FILTER_FLAG_AUTO_OLD)
+ return (B_TRUE);
+ else
+ return (B_FALSE);
+}
+
+static void
+ef10_filter_set_entry(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index,
+ __in_opt const efx_filter_spec_t *efsp)
+{
+ EFE_SPEC(eftp, index) = (uintptr_t)efsp;
+}
+
+static void
+ef10_filter_set_entry_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_not_busy(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_BUSY;
+}
+
+static void
+ef10_filter_set_entry_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+ EFE_SPEC(eftp, index) |= (uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+}
+
+static void
+ef10_filter_set_entry_not_auto_old(
+ __inout ef10_filter_table_t *eftp,
+ __in unsigned int index)
+{
+ EFE_SPEC(eftp, index) &= ~(uintptr_t)EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ EFSYS_ASSERT(ef10_filter_entry_spec(eftp, index) != NULL);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+#define MATCH_MASK(match) (EFX_MASK32(match) << EFX_LOW_BIT(match))
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_HOST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_IP));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_REM_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_MAC ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_MAC));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_LOC_PORT ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_DST_PORT));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_ETHER_TYPE ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_INNER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_OUTER_VID ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_IP_PROTO ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO));
+ EFX_STATIC_ASSERT(EFX_FILTER_MATCH_UNKNOWN_MCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST));
+ EFX_STATIC_ASSERT((uint32_t)EFX_FILTER_MATCH_UNKNOWN_UCAST_DST ==
+ MATCH_MASK(MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST));
+#undef MATCH_MASK
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (ef10_filter_table_t), eftp);
+
+ if (!eftp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_ef10_filter_table = eftp;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_filter.ef_ef10_filter_table != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (ef10_filter_table_t),
+ enp->en_filter.ef_ef10_filter_table);
+ }
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_add(
+ __in efx_nic_t *enp,
+ __in efx_filter_spec_t *spec,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+ MC_CMD_FILTER_OP_OUT_LEN)];
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REPLACE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO,
+ handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI,
+ handle->efh_hi);
+ /* Fall through */
+ case MC_CMD_FILTER_OP_IN_OP_INSERT:
+ case MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP, filter_op);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_MATCH_FIELDS,
+ spec->efs_match_flags);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_DEST,
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_QUEUE,
+ spec->efs_dmaq_id);
+ if (spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) {
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_CONTEXT,
+ spec->efs_rss_context);
+ }
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_RX_MODE,
+ spec->efs_flags & EFX_FILTER_FLAG_RX_RSS ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+
+ if (filter_op != MC_CMD_FILTER_OP_IN_OP_REPLACE) {
+ /*
+ * NOTE: Unlike most MCDI requests, the filter fields
+ * are presented in network (big endian) byte order.
+ */
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_MAC),
+ spec->efs_rem_mac, EFX_MAC_ADDR_LEN);
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_MAC),
+ spec->efs_loc_mac, EFX_MAC_ADDR_LEN);
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_SRC_PORT,
+ __CPU_TO_BE_16(spec->efs_rem_port));
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_DST_PORT,
+ __CPU_TO_BE_16(spec->efs_loc_port));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_ETHER_TYPE,
+ __CPU_TO_BE_16(spec->efs_ether_type));
+
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_INNER_VLAN,
+ __CPU_TO_BE_16(spec->efs_inner_vid));
+ MCDI_IN_SET_WORD(req, FILTER_OP_IN_OUTER_VLAN,
+ __CPU_TO_BE_16(spec->efs_outer_vid));
+
+ /* IP protocol (in low byte, high byte is zero) */
+ MCDI_IN_SET_BYTE(req, FILTER_OP_IN_IP_PROTO,
+ spec->efs_ip_proto);
+
+ EFX_STATIC_ASSERT(sizeof (spec->efs_rem_host) ==
+ MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+ EFX_STATIC_ASSERT(sizeof (spec->efs_loc_host) ==
+ MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_SRC_IP),
+ &spec->efs_rem_host.eo_byte[0],
+ MC_CMD_FILTER_OP_IN_SRC_IP_LEN);
+ memcpy(MCDI_IN2(req, uint8_t, FILTER_OP_IN_DST_IP),
+ &spec->efs_loc_host.eo_byte[0],
+ MC_CMD_FILTER_OP_IN_DST_IP_LEN);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ handle->efh_lo = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_LO);
+ handle->efh_hi = MCDI_OUT_DWORD(req, FILTER_OP_OUT_HANDLE_HI);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_filter_op_delete(
+ __in efx_nic_t *enp,
+ __in unsigned int filter_op,
+ __inout ef10_filter_handle_t *handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FILTER_OP_IN_LEN,
+ MC_CMD_FILTER_OP_OUT_LEN)];
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FILTER_OP;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FILTER_OP_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FILTER_OP_OUT_LEN;
+
+ switch (filter_op) {
+ case MC_CMD_FILTER_OP_IN_OP_REMOVE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ break;
+ case MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE:
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_LO, handle->efh_lo);
+ MCDI_IN_SET_DWORD(req, FILTER_OP_IN_HANDLE_HI, handle->efh_hi);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FILTER_OP_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+ef10_filter_equal(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ /* FIXME: Consider rx vs tx filters (look at efs_flags) */
+ if (left->efs_match_flags != right->efs_match_flags)
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_rem_host, right->efs_rem_host))
+ return (B_FALSE);
+ if (!EFX_OWORD_IS_EQUAL(left->efs_loc_host, right->efs_loc_host))
+ return (B_FALSE);
+ if (memcmp(left->efs_rem_mac, right->efs_rem_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (memcmp(left->efs_loc_mac, right->efs_loc_mac, EFX_MAC_ADDR_LEN))
+ return (B_FALSE);
+ if (left->efs_rem_port != right->efs_rem_port)
+ return (B_FALSE);
+ if (left->efs_loc_port != right->efs_loc_port)
+ return (B_FALSE);
+ if (left->efs_inner_vid != right->efs_inner_vid)
+ return (B_FALSE);
+ if (left->efs_outer_vid != right->efs_outer_vid)
+ return (B_FALSE);
+ if (left->efs_ether_type != right->efs_ether_type)
+ return (B_FALSE);
+ if (left->efs_ip_proto != right->efs_ip_proto)
+ return (B_FALSE);
+
+ return (B_TRUE);
+
+}
+
+static __checkReturn boolean_t
+ef10_filter_same_dest(
+ __in const efx_filter_spec_t *left,
+ __in const efx_filter_spec_t *right)
+{
+ if ((left->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ (right->efs_flags & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_rss_context == right->efs_rss_context)
+ return (B_TRUE);
+ } else if ((~(left->efs_flags) & EFX_FILTER_FLAG_RX_RSS) &&
+ (~(right->efs_flags) & EFX_FILTER_FLAG_RX_RSS)) {
+ if (left->efs_dmaq_id == right->efs_dmaq_id)
+ return (B_TRUE);
+ }
+ return (B_FALSE);
+}
+
+static __checkReturn uint32_t
+ef10_filter_hash(
+ __in efx_filter_spec_t *spec)
+{
+ EFX_STATIC_ASSERT((sizeof (efx_filter_spec_t) % sizeof (uint32_t))
+ == 0);
+ EFX_STATIC_ASSERT((EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid) %
+ sizeof (uint32_t)) == 0);
+
+ /*
+ * As the area of the efx_filter_spec_t we need to hash is DWORD
+ * aligned and an exact number of DWORDs in size we can use the
+ * optimised efx_hash_dwords() rather than efx_hash_bytes()
+ */
+ return (efx_hash_dwords((const uint32_t *)&spec->efs_outer_vid,
+ (sizeof (efx_filter_spec_t) -
+ EFX_FIELD_OFFSET(efx_filter_spec_t, efs_outer_vid)) /
+ sizeof (uint32_t), 0));
+}
+
+/*
+ * Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static __checkReturn boolean_t
+ef10_filter_is_exclusive(
+ __in efx_filter_spec_t *spec)
+{
+ if ((spec->efs_match_flags & EFX_FILTER_MATCH_LOC_MAC) &&
+ !EFX_MAC_ADDR_IS_MULTICAST(spec->efs_loc_mac))
+ return (B_TRUE);
+
+ if ((spec->efs_match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV4) &&
+ ((spec->efs_loc_host.eo_u8[0] & 0xf) != 0xe))
+ return (B_TRUE);
+ if ((spec->efs_ether_type == EFX_ETHER_TYPE_IPV6) &&
+ (spec->efs_loc_host.eo_u8[0] != 0xff))
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp)
+{
+ int tbl_id;
+ efx_filter_spec_t *spec;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ boolean_t restoring;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ for (tbl_id = 0; tbl_id < EFX_EF10_FILTER_TBL_ROWS; tbl_id++) {
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ spec = ef10_filter_entry_spec(eftp, tbl_id);
+ if (spec == NULL) {
+ restoring = B_FALSE;
+ } else if (ef10_filter_entry_is_busy(eftp, tbl_id)) {
+ /* Ignore busy entries. */
+ restoring = B_FALSE;
+ } else {
+ ef10_filter_set_entry_busy(eftp, tbl_id);
+ restoring = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (restoring == B_FALSE)
+ continue;
+
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[tbl_id].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail1;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ ef10_filter_set_entry_not_busy(eftp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * An arbitrary search limit for the software hash table. As per the linux net
+ * driver.
+ */
+#define EF10_FILTER_SEARCH_LIMIT 200
+
+static __checkReturn efx_rc_t
+ef10_filter_add_internal(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace,
+ __out_opt uint32_t *filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ uint32_t hash;
+ unsigned int depth;
+ int ins_index;
+ boolean_t replacing = B_FALSE;
+ unsigned int i;
+ efsys_lock_state_t state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+#if EFSYS_OPT_RX_SCALE
+ spec->efs_rss_context = enp->en_rss_context;
+#endif
+
+ hash = ef10_filter_hash(spec);
+
+ /*
+ * FIXME: Add support for inserting filters of different priorities
+ * and removing lower priority multicast filters (bug 42378)
+ */
+
+ /*
+ * Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ ins_index = -1;
+ depth = 1;
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(eftp, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0) {
+ ins_index = i;
+ }
+ } else if (ef10_filter_equal(spec, saved_spec)) {
+ if (ef10_filter_entry_is_busy(eftp, i))
+ break;
+ if (saved_spec->efs_priority
+ == EFX_FILTER_PRI_AUTO) {
+ ins_index = i;
+ goto found;
+ } else if (ef10_filter_is_exclusive(spec)) {
+ if (may_replace) {
+ ins_index = i;
+ goto found;
+ } else {
+ rc = EEXIST;
+ goto fail1;
+ }
+ }
+
+ /* Leave existing */
+ }
+
+ /*
+ * Once we reach the maximum search depth, use
+ * the first suitable slot or return EBUSY if
+ * there was none.
+ */
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+ goto found;
+ }
+ depth++;
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+ }
+
+found:
+ /*
+ * Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = ef10_filter_entry_spec(eftp, ins_index);
+ if (saved_spec) {
+ if (saved_spec->efs_priority == EFX_FILTER_PRI_AUTO) {
+ /* This is a filter we are refreshing */
+ ef10_filter_set_entry_not_auto_old(eftp, ins_index);
+ goto out_unlock;
+
+ }
+ replacing = B_TRUE;
+ } else {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*spec), saved_spec);
+ if (!saved_spec) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+ *saved_spec = *spec;
+ ef10_filter_set_entry(eftp, ins_index, saved_spec);
+ }
+ ef10_filter_set_entry_busy(eftp, ins_index);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ /*
+ * On replacing the filter handle may change after after a successful
+ * replace operation.
+ */
+ if (replacing) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_INSERT,
+ &eftp->eft_entry[ins_index].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_add(enp, spec,
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE,
+ &eftp->eft_entry[ins_index].efe_handle);
+ }
+
+ if (rc != 0)
+ goto fail4;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ if (replacing) {
+ /* Update the fields that may differ */
+ saved_spec->efs_priority = spec->efs_priority;
+ saved_spec->efs_flags = spec->efs_flags;
+ saved_spec->efs_rss_context = spec->efs_rss_context;
+ saved_spec->efs_dmaq_id = spec->efs_dmaq_id;
+ }
+
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+
+out_unlock:
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ if (filter_id)
+ *filter_id = ins_index;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+ if (!replacing) {
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), saved_spec);
+ saved_spec = NULL;
+ }
+ ef10_filter_set_entry_not_busy(eftp, ins_index);
+ ef10_filter_set_entry(eftp, ins_index, NULL);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+
+ rc = ef10_filter_add_internal(enp, spec, may_replace, NULL);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_delete_internal(
+ __in efx_nic_t *enp,
+ __in uint32_t filter_id)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *spec;
+ efsys_lock_state_t state;
+ uint32_t filter_idx = filter_id % EFX_EF10_FILTER_TBL_ROWS;
+
+ /*
+ * Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ *
+ * FIXME: What if the busy flag is never cleared?
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ while (ef10_filter_entry_is_busy(table, filter_idx)) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_SPIN(1);
+ EFSYS_LOCK(enp->en_eslp, state);
+ }
+ if ((spec = ef10_filter_entry_spec(table, filter_idx)) != NULL) {
+ ef10_filter_set_entry_busy(table, filter_idx);
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (spec == NULL) {
+ rc = ENOENT;
+ goto fail1;
+ }
+
+ /*
+ * Try to remove the hardware filter. This may fail if the MC has
+ * rebooted (which frees all hardware filter resources).
+ */
+ if (ef10_filter_is_exclusive(spec)) {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE,
+ &table->eft_entry[filter_idx].efe_handle);
+ } else {
+ rc = efx_mcdi_filter_op_delete(enp,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE,
+ &table->eft_entry[filter_idx].efe_handle);
+ }
+
+ /* Free the software table entry */
+ EFSYS_LOCK(enp->en_eslp, state);
+ ef10_filter_set_entry_not_busy(table, filter_idx);
+ ef10_filter_set_entry(table, filter_idx, NULL);
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (*spec), spec);
+
+ /* Check result of hardware filter removal */
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t *saved_spec;
+ unsigned int hash;
+ unsigned int depth;
+ unsigned int i;
+ efsys_lock_state_t state;
+ boolean_t locked = B_FALSE;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ hash = ef10_filter_hash(spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+ locked = B_TRUE;
+
+ depth = 1;
+ for (;;) {
+ i = (hash + depth) & (EFX_EF10_FILTER_TBL_ROWS - 1);
+ saved_spec = ef10_filter_entry_spec(table, i);
+ if (saved_spec && ef10_filter_equal(spec, saved_spec) &&
+ ef10_filter_same_dest(spec, saved_spec)) {
+ break;
+ }
+ if (depth == EF10_FILTER_SEARCH_LIMIT) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ depth++;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ locked = B_FALSE;
+
+ rc = ef10_filter_delete_internal(enp, i);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ if (locked)
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_parser_disp_info(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PARSER_DISP_INFO_IN_LEN,
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX)];
+ size_t matches_count;
+ size_t list_size;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PARSER_DISP_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PARSER_DISP_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, GET_PARSER_DISP_INFO_OUT_OP,
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ matches_count = MCDI_OUT_DWORD(req,
+ GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES);
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(matches_count)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *list_lengthp = matches_count;
+
+ if (buffer_length < matches_count) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ /*
+ * Check that the elements in the list in the MCDI response are the size
+ * we expect, so we can just copy them directly. Any conversion of the
+ * flags is handled by the caller.
+ */
+ EFX_STATIC_ASSERT(sizeof (uint32_t) ==
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN);
+
+ list_size = matches_count *
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN;
+ memcpy(buffer,
+ MCDI_OUT2(req, uint32_t,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES),
+ list_size);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+
+ size_t mcdi_list_length;
+ size_t list_length;
+ uint32_t i;
+ efx_rc_t rc;
+ uint32_t all_filter_flags =
+ (EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_LOC_HOST |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_REM_PORT |
+ EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_INNER_VID |
+ EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST |
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST);
+
+ rc = efx_mcdi_get_parser_disp_info(enp, buffer, buffer_length,
+ &mcdi_list_length);
+ if (rc != 0) {
+ if (rc == ENOSPC) {
+ /* Pass through mcdi_list_length for the list length */
+ *list_lengthp = mcdi_list_length;
+ }
+ goto fail1;
+ }
+
+ /*
+ * The static assertions in ef10_filter_init() ensure that the values of
+ * the EFX_FILTER_MATCH flags match those used by MCDI, so they don't
+ * need to be converted.
+ *
+ * In case support is added to MCDI for additional flags, remove any
+ * matches from the list which include flags we don't support. The order
+ * of the matches is preserved as they are ordered from highest to
+ * lowest priority.
+ */
+ EFSYS_ASSERT(mcdi_list_length <= buffer_length);
+ list_length = 0;
+ for (i = 0; i < mcdi_list_length; i++) {
+ if ((buffer[i] & ~all_filter_flags) == 0) {
+ buffer[list_length] = buffer[i];
+ list_length++;
+ }
+ }
+
+ *list_lengthp = list_length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_unicast(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *addr,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the filter for the local station address */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC, addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_unicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown unicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_uc_def(&spec);
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_unicst_filter_indexes[eftp->eft_unicst_filter_count]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_unicst_filter_count++;
+ EFSYS_ASSERT(eftp->eft_unicst_filter_count <=
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_multicast_list(
+ __in efx_nic_t *enp,
+ __in boolean_t mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count,
+ __in efx_filter_flags_t filter_flags,
+ __in boolean_t rollback)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ uint8_t addr[6];
+ uint32_t i;
+ uint32_t filter_index;
+ uint32_t filter_count;
+ efx_rc_t rc;
+
+ if (mulcst == B_FALSE)
+ count = 0;
+
+ if (count + (brdcst ? 1 : 0) >
+ EFX_ARRAY_SIZE(eftp->eft_mulcst_filter_indexes)) {
+ /* Too many MAC addresses */
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Insert/renew multicast address list filters */
+ filter_count = 0;
+ for (i = 0; i < count; i++) {
+ efx_filter_spec_init_rx(&spec,
+ EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ efx_filter_spec_set_eth_local(&spec,
+ EFX_FILTER_SPEC_VID_UNSPEC,
+ &addrs[i * EFX_MAC_ADDR_LEN]);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+
+ }
+
+ if (brdcst == B_TRUE) {
+ /* Insert/renew broadcast address filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+
+ EFX_MAC_BROADCAST_ADDR_SET(addr);
+ efx_filter_spec_set_eth_local(&spec, EFX_FILTER_SPEC_VID_UNSPEC,
+ addr);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &filter_index);
+
+ if (rc == 0) {
+ eftp->eft_mulcst_filter_indexes[filter_count] =
+ filter_index;
+ filter_count++;
+ } else if (rollback == B_TRUE) {
+ /* Only stop upon failure if told to rollback */
+ goto rollback;
+ }
+ }
+
+ eftp->eft_mulcst_filter_count = filter_count;
+ eftp->eft_using_all_mulcst = B_FALSE;
+
+ return (0);
+
+rollback:
+ /* Remove any filters we have inserted */
+ i = filter_count;
+ while (i--) {
+ (void) ef10_filter_delete_internal(enp,
+ eftp->eft_mulcst_filter_indexes[i]);
+ }
+ eftp->eft_mulcst_filter_count = 0;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+ef10_filter_insert_all_multicast(
+ __in efx_nic_t *enp,
+ __in efx_filter_flags_t filter_flags)
+{
+ ef10_filter_table_t *eftp = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_spec_t spec;
+ efx_rc_t rc;
+
+ /* Insert the unknown multicast filter */
+ efx_filter_spec_init_rx(&spec, EFX_FILTER_PRI_AUTO,
+ filter_flags,
+ eftp->eft_default_rxq);
+ efx_filter_spec_set_mc_def(&spec);
+
+ rc = ef10_filter_add_internal(enp, &spec, B_TRUE,
+ &eftp->eft_mulcst_filter_indexes[0]);
+ if (rc != 0)
+ goto fail1;
+
+ eftp->eft_mulcst_filter_count = 1;
+ eftp->eft_using_all_mulcst = B_TRUE;
+
+ /*
+ * FIXME: If brdcst == B_FALSE, add a filter to drop broadcast traffic.
+ */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_filter_remove_old(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ uint32_t i;
+
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ (void) ef10_filter_delete_internal(enp, i);
+ }
+ }
+}
+
+
+static __checkReturn efx_rc_t
+ef10_filter_get_workarounds(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ uint32_t implemented = 0;
+ uint32_t enabled = 0;
+ efx_rc_t rc;
+
+ rc = efx_mcdi_get_workarounds(enp, &implemented, &enabled);
+ if (rc == 0) {
+ /* Check if chained multicast filter support is enabled */
+ if (implemented & enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807)
+ encp->enc_bug26807_workaround = B_TRUE;
+ else
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if (rc == ENOTSUP) {
+ /*
+ * Firmware is too old to support GET_WORKAROUNDS, and support
+ * for this workaround was implemented later.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+
+/*
+ * Reconfigure all filters.
+ * If all_unicst and/or all mulcst filters cannot be applied then
+ * return ENOTSUP (Note the filters for the specified addresses are
+ * still applied in this case).
+ */
+ __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+ efx_filter_flags_t filter_flags;
+ unsigned int i;
+ efx_rc_t all_unicst_rc = 0;
+ efx_rc_t all_mulcst_rc = 0;
+ efx_rc_t rc;
+
+ if (table->eft_default_rxq == NULL) {
+ /*
+ * Filters direct traffic to the default RXQ, and so cannot be
+ * inserted until it is available. Any currently configured
+ * filters must be removed (ignore errors in case the MC
+ * has rebooted, which removes hardware filters).
+ */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ table->eft_unicst_filter_count = 0;
+
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ (void) ef10_filter_delete_internal(enp,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+ table->eft_mulcst_filter_count = 0;
+
+ return (0);
+ }
+
+ if (table->eft_using_rss)
+ filter_flags = EFX_FILTER_FLAG_RX_RSS;
+ else
+ filter_flags = 0;
+
+ /* Mark old filters which may need to be removed */
+ for (i = 0; i < table->eft_unicst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_unicst_filter_indexes[i]);
+ }
+ for (i = 0; i < table->eft_mulcst_filter_count; i++) {
+ ef10_filter_set_entry_auto_old(table,
+ table->eft_mulcst_filter_indexes[i]);
+ }
+
+ /*
+ * Insert or renew unicast filters.
+ *
+ * Frimware does not perform chaining on unicast filters. As traffic is
+ * therefore only delivered to the first matching filter, we should
+ * always insert the specific filter for our MAC address, to try and
+ * ensure we get that traffic.
+ *
+ * (If the filter for our MAC address has already been inserted by
+ * another function, we won't receive traffic sent to us, even if we
+ * insert a unicast mismatch filter. To prevent traffic stealing, this
+ * therefore relies on the privilege model only allowing functions to
+ * insert filters for their own MAC address unless explicitly given
+ * additional privileges by the user. This also means that, even on a
+ * priviliged function, inserting a unicast mismatch filter may not
+ * catch all traffic in multi PCI function scenarios.)
+ */
+ table->eft_unicst_filter_count = 0;
+ rc = ef10_filter_insert_unicast(enp, mac_addr, filter_flags);
+ if (all_unicst || (rc != 0)) {
+ all_unicst_rc = ef10_filter_insert_all_unicast(enp,
+ filter_flags);
+ if ((rc != 0) && (all_unicst_rc != 0))
+ goto fail1;
+ }
+
+ /*
+ * WORKAROUND_BUG26807 controls firmware support for chained multicast
+ * filters, and can only be enabled or disabled when the hardware filter
+ * table is empty.
+ *
+ * Chained multicast filters require support from the datapath firmware,
+ * and may not be available (e.g. low-latency variants or old Huntington
+ * firmware).
+ *
+ * Firmware will reset (FLR) functions which have inserted filters in
+ * the hardware filter table when the workaround is enabled/disabled.
+ * Functions without any hardware filters are not reset.
+ *
+ * Re-check if the workaround is enabled after adding unicast hardware
+ * filters. This ensures that encp->enc_bug26807_workaround matches the
+ * firmware state, and that later changes to enable/disable the
+ * workaround will result in this function seeing a reset (FLR).
+ *
+ * In common-code drivers, we only support multiple PCI function
+ * scenarios with firmware that supports multicast chaining, so we can
+ * assume it is enabled for such cases and hence simplify the filter
+ * insertion logic. Firmware that does not support multicast chaining
+ * does not support multiple PCI function configurations either, so
+ * filter insertion is much simpler and the same strategies can still be
+ * used.
+ */
+ if ((rc = ef10_filter_get_workarounds(enp)) != 0)
+ goto fail2;
+
+ if ((table->eft_using_all_mulcst != all_mulcst) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is enabled, so traffic that matches
+ * more than one multicast filter will be replicated and
+ * delivered to multiple recipients. To avoid this duplicate
+ * delivery, remove old multicast filters before inserting new
+ * multicast filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ /* Insert or renew multicast filters */
+ if (all_mulcst == B_TRUE) {
+ /*
+ * Insert the all multicast filter. If that fails, try to insert
+ * all of our multicast filters (but without rollback on
+ * failure).
+ */
+ all_mulcst_rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (all_mulcst_rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp, B_TRUE,
+ brdcst, addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail3;
+ }
+ } else {
+ /*
+ * Insert filters for multicast addresses.
+ * If any insertion fails, then rollback and try to insert the
+ * all multicast filter instead.
+ * If that also fails, try to insert all of the multicast
+ * filters (but without rollback on failure).
+ */
+ rc = ef10_filter_insert_multicast_list(enp, mulcst, brdcst,
+ addrs, count, filter_flags, B_TRUE);
+ if (rc != 0) {
+ if ((table->eft_using_all_mulcst == B_FALSE) &&
+ (encp->enc_bug26807_workaround == B_TRUE)) {
+ /*
+ * Multicast filter chaining is on, so remove
+ * old filters before inserting the multicast
+ * all filter to avoid duplicate delivery caused
+ * by packets matching multiple filters.
+ */
+ ef10_filter_remove_old(enp);
+ }
+
+ rc = ef10_filter_insert_all_multicast(enp,
+ filter_flags);
+ if (rc != 0) {
+ rc = ef10_filter_insert_multicast_list(enp,
+ mulcst, brdcst,
+ addrs, count, filter_flags, B_FALSE);
+ if (rc != 0)
+ goto fail4;
+ }
+ }
+ }
+
+ /* Remove old filters which were not renewed */
+ ef10_filter_remove_old(enp);
+
+ /* report if any optional flags were rejected */
+ if (((all_unicst != B_FALSE) && (all_unicst_rc != 0)) ||
+ ((all_mulcst != B_FALSE) && (all_mulcst_rc != 0))) {
+ rc = ENOTSUP;
+ }
+
+ return (rc);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Clear auto old flags */
+ for (i = 0; i < EFX_ARRAY_SIZE(table->eft_entry); i++) {
+ if (ef10_filter_entry_is_auto_old(table, i)) {
+ ef10_filter_set_entry_not_auto_old(table, i);
+ }
+ }
+
+ return (rc);
+}
+
+ void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ *erpp = table->eft_default_rxq;
+ *using_rss = table->eft_using_rss;
+}
+
+
+ void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+#if EFSYS_OPT_RX_SCALE
+ EFSYS_ASSERT((using_rss == B_FALSE) ||
+ (enp->en_rss_context != EF10_RSS_CONTEXT_INVALID));
+ table->eft_using_rss = using_rss;
+#else
+ EFSYS_ASSERT(using_rss == B_FALSE);
+ table->eft_using_rss = B_FALSE;
+#endif
+ table->eft_default_rxq = erp;
+}
+
+ void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ ef10_filter_table_t *table = enp->en_filter.ef_ef10_filter_table;
+
+ table->eft_default_rxq = NULL;
+ table->eft_using_rss = B_FALSE;
+}
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_impl.h b/drivers/net/sfc/base/ef10_impl.h
new file mode 100644
index 00000000..8c3dffee
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_impl.h
@@ -0,0 +1,1183 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EF10_IMPL_H
+#define _SYS_EF10_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if (EFSYS_OPT_HUNTINGTON && EFSYS_OPT_MEDFORD)
+#define EF10_MAX_PIOBUF_NBUFS MAX(HUNT_PIOBUF_NBUFS, MEDFORD_PIOBUF_NBUFS)
+#elif EFSYS_OPT_HUNTINGTON
+#define EF10_MAX_PIOBUF_NBUFS HUNT_PIOBUF_NBUFS
+#elif EFSYS_OPT_MEDFORD
+#define EF10_MAX_PIOBUF_NBUFS MEDFORD_PIOBUF_NBUFS
+#endif
+
+/*
+ * FIXME: This is just a power of 2 which fits in an MCDI v1 message, and could
+ * possibly be increased, or the write size reported by newer firmware used
+ * instead.
+ */
+#define EF10_NVRAM_CHUNK 0x80
+
+/* Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary
+ */
+#define EF10_RX_WPTR_ALIGN 8
+
+/*
+ * Max byte offset into the packet the TCP header must start for the hardware
+ * to be able to parse the packet correctly.
+ */
+#define EF10_TCP_HEADER_OFFSET_LIMIT 208
+
+/* Invalid RSS context handle */
+#define EF10_RSS_CONTEXT_INVALID (0xffffffff)
+
+
+/* EV */
+
+ __checkReturn efx_rc_t
+ef10_ev_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_ev_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep);
+
+ void
+ef10_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+ __checkReturn efx_rc_t
+ef10_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+ void
+ef10_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+ __checkReturn efx_rc_t
+ef10_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+#endif /* EFSYS_OPT_QSTATS */
+
+ void
+ef10_ev_rxlabel_init(
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp,
+ __in unsigned int label,
+ __in boolean_t packed_stream);
+
+ void
+ef10_ev_rxlabel_fini(
+ __in efx_evq_t *eep,
+ __in unsigned int label);
+
+/* INTR */
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp);
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp);
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp);
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+ef10_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp);
+
+
+/* MAC */
+
+extern __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+extern __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+
+/* MCDI */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+extern void
+ef10_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *resultp);
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size);
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end
+ );
+
+extern __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_NVRAM */
+
+
+/* PHY */
+
+typedef struct ef10_link_state_s {
+ uint32_t els_adv_cap_mask;
+ uint32_t els_lp_cap_mask;
+ unsigned int els_fcntl;
+ efx_link_mode_t els_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t els_loopback;
+#endif
+ boolean_t els_mac_up;
+} ef10_link_state_t;
+
+extern void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+ef10_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+ef10_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+ef10_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+/* TX */
+
+extern __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+ef10_tx_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+extern void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+extern void
+ef10_rx_qps_update_credits(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn uint8_t *
+ef10_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+extern void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t vlan_tci,
+ __out efx_desc_t *edp);
+
+
+#if EFSYS_OPT_QSTATS
+
+extern void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+typedef uint32_t efx_piobuf_handle_t;
+
+#define EFX_PIOBUF_HANDLE_INVALID ((efx_piobuf_handle_t) -1)
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle);
+
+extern __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index);
+
+
+/* VPD */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+ef10_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+extern __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#if EFSYS_OPT_RX_SCALE
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+extern void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+extern void
+ef10_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_FILTER
+
+typedef struct ef10_filter_handle_s {
+ uint32_t efh_lo;
+ uint32_t efh_hi;
+} ef10_filter_handle_t;
+
+typedef struct ef10_filter_entry_s {
+ uintptr_t efe_spec; /* pointer to filter spec plus busy bit */
+ ef10_filter_handle_t efe_handle;
+} ef10_filter_entry_t;
+
+/*
+ * BUSY flag indicates that an update is in progress.
+ * AUTO_OLD flag is used to mark and sweep MAC packet filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1U
+#define EFX_EF10_FILTER_FLAG_AUTO_OLD 2U
+#define EFX_EF10_FILTER_FLAGS 3U
+
+/*
+ * Size of the hash table used by the driver. Doesn't need to be the
+ * same size as the hardware's table.
+ */
+#define EFX_EF10_FILTER_TBL_ROWS 8192
+
+/* Only need to allow for one directed and one unknown unicast filter */
+#define EFX_EF10_FILTER_UNICAST_FILTERS_MAX 2
+
+/* Allow for the broadcast address to be added to the multicast list */
+#define EFX_EF10_FILTER_MULTICAST_FILTERS_MAX (EFX_MAC_MULTICAST_LIST_MAX + 1)
+
+typedef struct ef10_filter_table_s {
+ ef10_filter_entry_t eft_entry[EFX_EF10_FILTER_TBL_ROWS];
+ efx_rxq_t *eft_default_rxq;
+ boolean_t eft_using_rss;
+ uint32_t eft_unicst_filter_indexes[
+ EFX_EF10_FILTER_UNICAST_FILTERS_MAX];
+ uint32_t eft_unicst_filter_count;
+ uint32_t eft_mulcst_filter_indexes[
+ EFX_EF10_FILTER_MULTICAST_FILTERS_MAX];
+ uint32_t eft_mulcst_filter_count;
+ boolean_t eft_using_all_mulcst;
+} ef10_filter_table_t;
+
+ __checkReturn efx_rc_t
+ef10_filter_init(
+ __in efx_nic_t *enp);
+
+ void
+ef10_filter_fini(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_restore(
+ __in efx_nic_t *enp);
+
+ __checkReturn efx_rc_t
+ef10_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+ __checkReturn efx_rc_t
+ef10_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+ef10_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+extern __checkReturn efx_rc_t
+ef10_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+extern void
+ef10_filter_get_default_rxq(
+ __in efx_nic_t *enp,
+ __out efx_rxq_t **erpp,
+ __out boolean_t *using_rss);
+
+extern void
+ef10_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+ef10_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_FILTER */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep);
+
+extern __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp);
+
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp);
+
+extern __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/* Data space per credit in packed stream mode */
+#define EFX_RX_PACKED_STREAM_MEM_PER_CREDIT (1 << 16)
+
+/*
+ * Received packets are always aligned at this boundary. Also there always
+ * exists a gap of this size between packets.
+ * (see SF-112241-TC, 4.5)
+ */
+#define EFX_RX_PACKED_STREAM_ALIGNMENT 64
+
+/*
+ * Size of a pseudo-header prepended to received packets
+ * in packed stream mode
+ */
+#define EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE 8
+
+/* Minimum space for packet in packed stream mode */
+#define EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE \
+ P2ROUNDUP(EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE + \
+ EFX_MAC_PDU_MIN + \
+ EFX_RX_PACKED_STREAM_ALIGNMENT, \
+ EFX_RX_PACKED_STREAM_ALIGNMENT)
+
+/* Maximum number of credits */
+#define EFX_RX_PACKED_STREAM_MAX_CREDITS 127
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EF10_IMPL_H */
diff --git a/drivers/net/sfc/base/ef10_intr.c b/drivers/net/sfc/base/ef10_intr.c
new file mode 100644
index 00000000..16be3d8c
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_intr.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+ __checkReturn efx_rc_t
+ef10_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ _NOTE(ARGUNUSED(enp, type, esmp))
+ return (0);
+}
+
+
+ void
+ef10_intr_enable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+ void
+ef10_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_trigger_interrupt(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_TRIGGER_INTERRUPT_IN_LEN,
+ MC_CMD_TRIGGER_INTERRUPT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (level >= enp->en_nic_cfg.enc_intr_limit) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_TRIGGER_INTERRUPT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_TRIGGER_INTERRUPT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_TRIGGER_INTERRUPT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, TRIGGER_INTERRUPT_IN_INTR_LEVEL, level);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_bug41750_workaround) {
+ /*
+ * bug 41750: Test interrupts don't work on Greenport
+ * bug 50084: Test interrupts don't work on VFs
+ */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_trigger_interrupt(enp, level)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_dword_t dword;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read the queue mask and implicitly acknowledge the interrupt. */
+ EFX_BAR_READD(enp, ER_DZ_BIU_INT_ISR_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ _NOTE(ARGUNUSED(enp, message))
+
+ /* EF10 fatal errors are reported via events */
+ *fatalp = B_FALSE;
+}
+
+ void
+ef10_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ /* EF10 fatal errors are reported via events */
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+ef10_intr_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_mac.c b/drivers/net/sfc/base/ef10_mac.c
new file mode 100644
index 00000000..488633f5
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_mac.c
@@ -0,0 +1,897 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+ __checkReturn efx_rc_t
+ef10_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_fcntl = els.els_fcntl;
+
+ *link_modep = els.els_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ ef10_link_state_t els;
+ efx_rc_t rc;
+
+ /*
+ * Because EF10 doesn't *require* polling, we can't rely on
+ * ef10_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail1;
+
+ *mac_upp = els.els_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * EF10 adapters use MC_CMD_VADAPTOR_SET_MAC to set the
+ * MAC address; the address field in MC_CMD_SET_MAC has no
+ * effect.
+ * MC_CMD_VADAPTOR_SET_MAC requires mac-spoofing privilege and
+ * the port to have no filters or queues active.
+ */
+static __checkReturn efx_rc_t
+efx_mcdi_vadapter_set_mac(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_SET_MAC_IN_LEN,
+ MC_CMD_VADAPTOR_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
+ enp->en_vport_id);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, VADAPTOR_SET_MAC_IN_MACADDR),
+ epp->ep_mac_addr);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_addr_set(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_vadapter_set_mac(enp)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /*
+ * Fallback for older Huntington firmware without Vadapter
+ * support.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ /* Only configure the MTU in this call to MC_CMD_SET_MAC */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_MTU, mtu);
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_EXT_IN_CONTROL,
+ SET_MAC_EXT_IN_CFG_MTU, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_mtu_get(
+ __in efx_nic_t *enp,
+ __out size_t *mtu)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_EXT_IN_LEN,
+ MC_CMD_SET_MAC_V2_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_V2_OUT_LEN;
+
+ /*
+ * With MC_CMD_SET_MAC_EXT_IN_CONTROL set to 0, this just queries the
+ * MTU. This should always be supported on Medford, but it is not
+ * supported on older Huntington firmware.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_EXT_IN_CONTROL, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ if (req.emr_out_length_used < MC_CMD_SET_MAC_V2_OUT_MTU_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *mtu = MCDI_OUT_DWORD(req, SET_MAC_V2_OUT_MTU);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_pdu_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ if (encp->enc_enhanced_set_mac_supported) {
+ if ((rc = efx_mcdi_mtu_set(enp, epp->ep_mac_pdu)) != 0)
+ goto fail1;
+ } else {
+ /*
+ * Fallback for older Huntington firmware, which always
+ * configure all of the parameters to MC_CMD_SET_MAC. This isn't
+ * suitable for setting the MTU on unpriviliged functions.
+ */
+ if ((rc = ef10_mac_reconfigure(enp)) != 0)
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mtu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+__checkReturn efx_rc_t
+ef10_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+
+ /*
+ * Note: The Huntington MAC does not support REJECT_BRDCST.
+ * The REJECT_UNCST flag will also prevent multicast traffic
+ * from reaching the filters. As Huntington filters drop any
+ * traffic that does not match a filter it is ok to leave the
+ * MAC running in promiscuous mode. See bug41141.
+ *
+ * FIXME: Does REJECT_UNCST behave the same way on Medford?
+ */
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, 0,
+ SET_MAC_IN_REJECT_BRDCST, 0);
+
+ /*
+ * Flow control, whether it is auto-negotiated or not,
+ * is set via the PHY advertised capabilities. When set to
+ * automatic the MAC will use the PHY settings to determine
+ * the flow control settings.
+ */
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, MC_CMD_FCNTL_AUTO);
+
+ /* Do not include the Ethernet frame checksum in RX packets */
+ MCDI_IN_POPULATE_DWORD_1(req, SET_MAC_IN_FLAGS,
+ SET_MAC_IN_FLAG_INCLUDE_FCS, 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /*
+ * Unprivileged functions cannot control link state,
+ * but still need to configure filters.
+ */
+ if (req.emr_rc != EACCES) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ }
+
+ /*
+ * Apply the filters for the MAC configuration.
+ * If the NIC isn't ready to accept filters this may
+ * return success without setting anything.
+ */
+ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rxq_t *old_rxq;
+ boolean_t old_using_rss;
+ efx_rc_t rc;
+
+ ef10_filter_get_default_rxq(enp, &old_rxq, &old_using_rss);
+
+ ef10_filter_default_rxq_set(enp, erp, using_rss);
+
+ rc = efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ ef10_filter_default_rxq_set(enp, old_rxq, old_using_rss);
+
+ return (rc);
+}
+
+ void
+ef10_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ ef10_filter_default_rxq_clear(enp);
+
+ efx_filter_reconfigure(enp, epp->ep_mac_addr,
+ epp->ep_all_unicst, epp->ep_mulcst,
+ epp->ep_all_mulcst, epp->ep_brdcst,
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count);
+}
+
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+ef10_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on EF10 */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+ __checkReturn efx_rc_t
+ef10_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ const struct efx_mac_stats_range ef10_common[] = {
+ { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS },
+ { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_RX_DROP_EVENTS },
+ { EFX_MAC_RX_JABBER_PKTS, EFX_MAC_RX_JABBER_PKTS },
+ { EFX_MAC_RX_NODESC_DROP_CNT, EFX_MAC_TX_PAUSE_PKTS },
+ };
+ const struct efx_mac_stats_range ef10_tx_size_bins[] = {
+ { EFX_MAC_TX_LE_64_PKTS, EFX_MAC_TX_GE_15XX_PKTS },
+ };
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_common, EFX_ARRAY_SIZE(ef10_common))) != 0)
+ goto fail1;
+
+ if (epp->ep_phy_cap_mask & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
+ const struct efx_mac_stats_range ef10_40g_extra[] = {
+ { EFX_MAC_RX_ALIGN_ERRORS, EFX_MAC_RX_ALIGN_ERRORS },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_40g_extra, EFX_ARRAY_SIZE(ef10_40g_extra))) != 0)
+ goto fail2;
+
+ if (encp->enc_mac_stats_40g_tx_size_bins) {
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp,
+ mask_size, ef10_tx_size_bins,
+ EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ goto fail3;
+ }
+ } else {
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_tx_size_bins, EFX_ARRAY_SIZE(ef10_tx_size_bins))) != 0)
+ goto fail4;
+ }
+
+ if (encp->enc_pm_and_rxdp_counters) {
+ const struct efx_mac_stats_range ef10_pm_and_rxdp[] = {
+ { EFX_MAC_PM_TRUNC_BB_OVERFLOW, EFX_MAC_RXDP_HLB_WAIT },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_pm_and_rxdp, EFX_ARRAY_SIZE(ef10_pm_and_rxdp))) != 0)
+ goto fail5;
+ }
+
+ if (encp->enc_datapath_cap_evb) {
+ const struct efx_mac_stats_range ef10_vadaptor[] = {
+ { EFX_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_OVERFLOW },
+ };
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ ef10_vadaptor, EFX_ARRAY_SIZE(ef10_vadaptor))) != 0)
+ goto fail6;
+ }
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#define EF10_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+
+ __checkReturn efx_rc_t
+ef10_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ efx_qword_t value;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ /* Packet memory (EF10 only) */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_BB_OVERFLOW]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_VFIFO_FULL, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_VFIFO_FULL]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_TRUNC_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_TRUNC_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_QBB, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_QBB]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_PM_DISCARD_MAPPING, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_PM_DISCARD_MAPPING]), &value);
+
+ /* RX datapath */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_Q_DISABLED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_Q_DISABLED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_DI_DROPPED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_DI_DROPPED_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_STREAMING_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_STREAMING_PKTS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_FETCH]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RXDP_HLB_WAIT]), &value);
+
+
+ /* VADAPTER RX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_RX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_RX_OVERFLOW]), &value);
+
+ /* VADAPTER TX */
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_UNICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES]),
+ &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_PACKETS]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_BAD_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_BAD_BYTES]), &value);
+
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_VADAPTER_TX_OVERFLOW, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_VADAPTER_TX_OVERFLOW]), &value);
+
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_MEM_READ_BARRIER();
+ EF10_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_mcdi.c b/drivers/net/sfc/base/ef10_mcdi.c
new file mode 100644
index 00000000..5a26bda2
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_mcdi.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_MCDI
+
+#ifndef WITH_MCDI_V2
+#error "WITH_MCDI_V2 required for EF10 MCDIv2 commands."
+#endif
+
+
+ __checkReturn efx_rc_t
+ef10_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(enp->en_features & EFX_FEATURE_MCDI_DMA);
+
+ /*
+ * All EF10 firmware supports MCDIv2 and MCDIv1.
+ * Medford BootROM supports MCDIv2 and MCDIv1.
+ * Huntington BootROM supports MCDIv1 only.
+ */
+ emip->emi_max_version = 2;
+
+ /* A host DMA buffer is required for EF10 MCDI */
+ if (esmp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ensure that the MC doorbell is in a known state before issuing MCDI
+ * commands. The recovery algorithm requires that the MC command buffer
+ * must be 256 byte aligned. See bug24769.
+ */
+ if ((EFSYS_MEM_ADDR(esmp) & 0xFF) != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 1);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+
+ /* Save initial MC reboot status */
+ (void) ef10_mcdi_poll_reboot(enp);
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ efx_mcdi_new_epoch(enp);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+
+ emip->emi_new_epoch = B_FALSE;
+}
+
+/*
+ * In older firmware all commands are processed in a single thread, so a long
+ * running command for one PCIe function can block processing for another
+ * function (see bug 61269).
+ *
+ * In newer firmware that supports multithreaded MCDI processing, we can extend
+ * the timeout for long-running requests which we know firmware may choose to
+ * process in a background thread.
+ */
+#define EF10_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000)
+#define EF10_MCDI_CMD_LONG_TIMEOUT_US (60 * 1000 * 1000)
+
+ void
+ef10_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ switch (emrp->emr_cmd) {
+ case MC_CMD_POLL_BIST:
+ case MC_CMD_NVRAM_ERASE:
+ case MC_CMD_LICENSING_V3:
+ case MC_CMD_NVRAM_UPDATE_FINISH:
+ if (encp->enc_fw_verified_nvram_update_required != B_FALSE) {
+ /*
+ * Potentially longer running commands, which firmware
+ * may choose to process in a background thread.
+ */
+ *timeoutp = EF10_MCDI_CMD_LONG_TIMEOUT_US;
+ break;
+ }
+ /* FALLTHRU */
+ default:
+ *timeoutp = EF10_MCDI_CMD_TIMEOUT_US;
+ break;
+ }
+}
+
+ void
+ef10_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t dword;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Write the header */
+ for (pos = 0; pos < hdr_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)hdrp + pos);
+ EFSYS_MEM_WRITED(esmp, pos, &dword);
+ }
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFSYS_MEM_WRITED(esmp, hdr_len + pos, &dword);
+ }
+
+ /* Guarantee ordering of memory (MCDI request) and PIO (MC doorbell) */
+ EFSYS_DMA_SYNC_FOR_DEVICE(esmp, 0, hdr_len + sdu_len);
+ EFSYS_PIO_WRITE_BARRIER();
+
+ /* Ring the doorbell to post the command DMA address to the MC */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_LWRD_REG, &dword, B_FALSE);
+
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ EFX_BAR_WRITED(enp, ER_DZ_MC_DB_HWRD_REG, &dword, B_FALSE);
+}
+
+ __checkReturn boolean_t
+ef10_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ efx_dword_t hdr;
+
+ EFSYS_MEM_READD(esmp, 0, &hdr);
+ EFSYS_MEM_READ_BARRIER();
+
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+ef10_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efsys_mem_t *esmp = emtp->emt_dma_mem;
+ unsigned int pos;
+ efx_dword_t data;
+
+ for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ EFSYS_MEM_READD(esmp, offset + pos, &data);
+ memcpy((uint8_t *)bufferp + pos, &data,
+ MIN(sizeof (data), length - pos));
+ }
+}
+
+ efx_rc_t
+ef10_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ uint32_t old_status;
+ uint32_t new_status;
+ efx_rc_t rc;
+
+ old_status = emip->emi_mc_reboot_status;
+
+ /* Update MC reboot status word */
+ EFX_BAR_TBL_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, 0, &dword, B_FALSE);
+ new_status = dword.ed_u32[0];
+
+ /* MC has rebooted if the value has changed */
+ if (new_status != old_status) {
+ emip->emi_mc_reboot_status = new_status;
+
+ /*
+ * FIXME: Ignore detected MC REBOOT for now.
+ *
+ * The Siena support for checking for MC reboot from status
+ * flags is broken - see comments in siena_mcdi_poll_reboot().
+ * As the generic MCDI code is shared the EF10 reboot
+ * detection suffers similar problems.
+ *
+ * Do not report an error when the boot status changes until
+ * this can be handled by common code drivers (and reworked to
+ * support Siena too).
+ */
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = EIO;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t privilege_mask = encp->enc_privilege_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Use privilege mask state at MCDI attach.
+ */
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ /*
+ * Admin privilege used prior to introduction of
+ * specific flag.
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, LINK) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of change mac spoofing privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, CHANGE_MAC) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ /*
+ * Admin privilege must be used prior to introduction of
+ * mac spoofing privilege (at v4.6), which is used up to
+ * introduction of mac spoofing TX privilege (at v4.7)
+ */
+ *supportedp =
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING_TX) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, MAC_SPOOFING) ||
+ EFX_MCDI_HAVE_PRIVILEGE(privilege_mask, ADMIN);
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_nic.c b/drivers/net/sfc/base/ef10_nic.c
new file mode 100644
index 00000000..aac2679c
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_nic.c
@@ -0,0 +1,1780 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_assignment(
+ __in efx_nic_t *enp,
+ __out uint32_t *portp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN,
+ MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_port_modes(
+ __in efx_nic_t *enp,
+ __out uint32_t *modesp,
+ __out_opt uint32_t *current_modep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PORT_MODES_IN_LEN,
+ MC_CMD_GET_PORT_MODES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PORT_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /*
+ * Require only Modes and DefaultMode fields, unless the current mode
+ * was requested (CurrentMode field was added for Medford).
+ */
+ if (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ if ((current_modep != NULL) && (req.emr_out_length_used <
+ MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES);
+
+ if (current_modep != NULL) {
+ *current_modep = MCDI_OUT_DWORD(req,
+ GET_PORT_MODES_OUT_CURRENT_MODE);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_port_mode_bandwidth(
+ __in uint32_t port_mode,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ switch (port_mode) {
+ case TLV_PORT_MODE_10G:
+ bandwidth = 10000;
+ break;
+ case TLV_PORT_MODE_10G_10G:
+ bandwidth = 10000 * 2;
+ break;
+ case TLV_PORT_MODE_10G_10G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2:
+ case TLV_PORT_MODE_10G_10G_10G_10G_Q2:
+ bandwidth = 10000 * 4;
+ break;
+ case TLV_PORT_MODE_40G:
+ bandwidth = 40000;
+ break;
+ case TLV_PORT_MODE_40G_40G:
+ bandwidth = 40000 * 2;
+ break;
+ case TLV_PORT_MODE_40G_10G_10G:
+ case TLV_PORT_MODE_10G_10G_40G:
+ bandwidth = 40000 + (10000 * 2);
+ break;
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_alloc(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_ALLOC_IN_LEN,
+ MC_CMD_VADAPTOR_ALLOC_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_vport_id, ==, EVB_PORT_ID_NULL);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
+ MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS,
+ VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED,
+ enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_vadaptor_free(
+ __in efx_nic_t *enp,
+ __in uint32_t port_id)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VADAPTOR_FREE_IN_LEN,
+ MC_CMD_VADAPTOR_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VADAPTOR_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_pf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_mac_address_vf(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN,
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ addrp = MCDI_OUT2(req, uint8_t,
+ VPORT_GET_MAC_ADDRESSES_OUT_MACADDR);
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_clock(
+ __in efx_nic_t *enp,
+ __out uint32_t *sys_freqp,
+ __out uint32_t *dpcpu_freqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CLOCK_IN_LEN,
+ MC_CMD_GET_CLOCK_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CLOCK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ);
+ if (*sys_freqp == 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ);
+ if (*dpcpu_freqp == 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_vector_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *vec_basep,
+ __out_opt uint32_t *pf_nvecp,
+ __out_opt uint32_t *vf_nvecp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_VECTOR_CFG_IN_LEN,
+ MC_CMD_GET_VECTOR_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VECTOR_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (vec_basep != NULL)
+ *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE);
+ if (pf_nvecp != NULL)
+ *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF);
+ if (vf_nvecp != NULL)
+ *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_vis(
+ __in efx_nic_t *enp,
+ __in uint32_t min_vi_count,
+ __in uint32_t max_vi_count,
+ __out uint32_t *vi_basep,
+ __out uint32_t *vi_countp,
+ __out uint32_t *vi_shiftp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_VIS_IN_LEN,
+ MC_CMD_ALLOC_VIS_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (vi_countp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_VIS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count);
+ MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE);
+ *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT);
+
+ /* Report VI_SHIFT if available (always zero for Huntington) */
+ if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN)
+ *vi_shiftp = 0;
+ else
+ *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_vis(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_FREE_VIS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ /* Ignore ELREADY (no allocated VIs, so nothing to free) */
+ if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_alloc_piobuf(
+ __in efx_nic_t *enp,
+ __out efx_piobuf_handle_t *handlep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ALLOC_PIOBUF_IN_LEN,
+ MC_CMD_ALLOC_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (handlep == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ALLOC_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_free_piobuf(
+ __in efx_nic_t *enp,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FREE_PIOBUF_IN_LEN,
+ MC_CMD_FREE_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FREE_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_link_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LINK_PIOBUF_IN_LEN,
+ MC_CMD_LINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle);
+ MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_unlink_piobuf(
+ __in efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_UNLINK_PIOBUF_IN_LEN,
+ MC_CMD_UNLINK_PIOBUF_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_UNLINK_PIOBUF;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+ef10_nic_alloc_piobufs(
+ __in efx_nic_t *enp,
+ __in uint32_t max_piobuf_count)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(max_piobuf_count, <=,
+ EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle));
+
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+
+ for (i = 0; i < max_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ if (efx_mcdi_alloc_piobuf(enp, handlep) != 0)
+ goto fail1;
+
+ enp->en_arch.ef10.ena_pio_alloc_map[i] = 0;
+ enp->en_arch.ef10.ena_piobuf_count++;
+ }
+
+ return;
+
+fail1:
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+
+static void
+ef10_nic_free_piobufs(
+ __in efx_nic_t *enp)
+{
+ efx_piobuf_handle_t *handlep;
+ unsigned int i;
+
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ handlep = &enp->en_arch.ef10.ena_piobuf_handle[i];
+
+ efx_mcdi_free_piobuf(enp, *handlep);
+ *handlep = EFX_PIOBUF_HANDLE_INVALID;
+ }
+ enp->en_arch.ef10.ena_piobuf_count = 0;
+}
+
+/* Sub-allocate a block from a piobuf */
+ __checkReturn efx_rc_t
+ef10_nic_pio_alloc(
+ __inout efx_nic_t *enp,
+ __out uint32_t *bufnump,
+ __out efx_piobuf_handle_t *handlep,
+ __out uint32_t *blknump,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_drv_cfg_t *edcp = &enp->en_drv_cfg;
+ uint32_t blk_per_buf;
+ uint32_t buf, blk;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+ EFSYS_ASSERT(bufnump);
+ EFSYS_ASSERT(handlep);
+ EFSYS_ASSERT(blknump);
+ EFSYS_ASSERT(offsetp);
+ EFSYS_ASSERT(sizep);
+
+ if ((edcp->edc_pio_alloc_size == 0) ||
+ (enp->en_arch.ef10.ena_piobuf_count == 0)) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+ blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size;
+
+ for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) {
+ uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf];
+
+ if (~(*map) == 0)
+ continue;
+
+ EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map)));
+ for (blk = 0; blk < blk_per_buf; blk++) {
+ if ((*map & (1u << blk)) == 0) {
+ *map |= (1u << blk);
+ goto done;
+ }
+ }
+ }
+ rc = ENOMEM;
+ goto fail2;
+
+done:
+ *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf];
+ *bufnump = buf;
+ *blknump = blk;
+ *sizep = edcp->edc_pio_alloc_size;
+ *offsetp = blk * (*sizep);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Free a piobuf sub-allocated block */
+ __checkReturn efx_rc_t
+ef10_nic_pio_free(
+ __inout efx_nic_t *enp,
+ __in uint32_t bufnum,
+ __in uint32_t blknum)
+{
+ uint32_t *map;
+ efx_rc_t rc;
+
+ if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) ||
+ (blknum >= (8 * sizeof (*map)))) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum];
+ if ((*map & (1u << blknum)) == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+ *map &= ~(1u << blknum);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_link(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index,
+ __in efx_piobuf_handle_t handle)
+{
+ return (efx_mcdi_link_piobuf(enp, vi_index, handle));
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_pio_unlink(
+ __inout efx_nic_t *enp,
+ __in uint32_t vi_index)
+{
+ return (efx_mcdi_unlink_piobuf(enp, vi_index));
+}
+
+static __checkReturn efx_rc_t
+ef10_mcdi_get_pf_count(
+ __in efx_nic_t *enp,
+ __out uint32_t *pf_countp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PF_COUNT_IN_LEN,
+ MC_CMD_GET_PF_COUNT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PF_COUNT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *pf_countp = *MCDI_OUT(req, uint8_t,
+ MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST);
+
+ EFSYS_ASSERT(*pf_countp != 0);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_get_datapath_caps(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t flags;
+ uint32_t flags2;
+ uint32_t tso2nc;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_capabilities(enp, &flags, NULL, NULL,
+ &flags2, &tso2nc)) != 0)
+ goto fail1;
+
+ if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0)
+ goto fail1;
+
+#define CAP_FLAG(flags1, field) \
+ ((flags1) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+#define CAP_FLAG2(flags2, field) \
+ ((flags2) & (1 << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## field ## _LBN)))
+
+ /*
+ * Huntington RXDP firmware inserts a 0 or 14 byte prefix.
+ * We only support the 14 byte prefix here.
+ */
+ if (CAP_FLAG(flags, RX_PREFIX_LEN_14) == 0) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ encp->enc_rx_prefix_size = 14;
+
+ /* Check if the firmware supports TSO */
+ encp->enc_fw_assisted_tso_enabled =
+ CAP_FLAG(flags, TX_TSO) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports FATSOv2 */
+ encp->enc_fw_assisted_tso_v2_enabled =
+ CAP_FLAG2(flags2, TX_TSO_V2) ? B_TRUE : B_FALSE;
+
+ /* Get the number of TSO contexts (FATSOv2) */
+ encp->enc_fw_assisted_tso_v2_n_contexts =
+ CAP_FLAG2(flags2, TX_TSO_V2) ? tso2nc : 0;
+
+ /* Check if the firmware has vadapter/vport/vswitch support */
+ encp->enc_datapath_cap_evb =
+ CAP_FLAG(flags, EVB) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports VLAN insertion */
+ encp->enc_hw_tx_insert_vlan_enabled =
+ CAP_FLAG(flags, TX_VLAN_INSERTION) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports RX event batching */
+ encp->enc_rx_batching_enabled =
+ CAP_FLAG(flags, RX_BATCHING) ? B_TRUE : B_FALSE;
+
+ /*
+ * Even if batching isn't reported as supported, we may still get
+ * batched events (see bug61153).
+ */
+ encp->enc_rx_batch_max = 16;
+
+ /* Check if the firmware supports disabling scatter on RXQs */
+ encp->enc_rx_disable_scatter_supported =
+ CAP_FLAG(flags, RX_DISABLE_SCATTER) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports packed stream mode */
+ encp->enc_rx_packed_stream_supported =
+ CAP_FLAG(flags, RX_PACKED_STREAM) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if the firmware supports configurable buffer sizes
+ * for packed stream mode (otherwise buffer size is 1Mbyte)
+ */
+ encp->enc_rx_var_packed_stream_supported =
+ CAP_FLAG(flags, RX_PACKED_STREAM_VAR_BUFFERS) ? B_TRUE : B_FALSE;
+
+ /* Check if the firmware supports set mac with running filters */
+ encp->enc_allow_set_mac_with_installed_filters =
+ CAP_FLAG(flags, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED) ?
+ B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware supports the extended MC_CMD_SET_MAC, which allows
+ * specifying which parameters to configure.
+ */
+ encp->enc_enhanced_set_mac_supported =
+ CAP_FLAG(flags, SET_MAC_ENHANCED) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows
+ * us to let the firmware choose the settings to use on an EVQ.
+ */
+ encp->enc_init_evq_v2_supported =
+ CAP_FLAG2(flags2, INIT_EVQ_V2) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware-verified NVRAM updates must be used.
+ *
+ * The firmware trusted installer requires all NVRAM updates to use
+ * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update)
+ * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated
+ * partition and report the result).
+ */
+ encp->enc_fw_verified_nvram_update_required =
+ CAP_FLAG2(flags2, NVRAM_UPDATE_REPORT_VERIFY_RESULT) ?
+ B_TRUE : B_FALSE;
+
+ /*
+ * Check if firmware provides packet memory and Rx datapath
+ * counters.
+ */
+ encp->enc_pm_and_rxdp_counters =
+ CAP_FLAG(flags, PM_AND_RXDP_COUNTERS) ? B_TRUE : B_FALSE;
+
+ /*
+ * Check if the 40G MAC hardware is capable of reporting
+ * statistics for Tx size bins.
+ */
+ encp->enc_mac_stats_40g_tx_size_bins =
+ CAP_FLAG2(flags2, MAC_STATS_40G_TX_SIZE_BINS) ? B_TRUE : B_FALSE;
+
+#undef CAP_FLAG
+#undef CAP_FLAG2
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#define EF10_LEGACY_PF_PRIVILEGE_MASK \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \
+ MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS)
+
+#define EF10_LEGACY_VF_PRIVILEGE_MASK 0
+
+
+ __checkReturn efx_rc_t
+ef10_get_privilege_mask(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t mask;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf,
+ &mask)) != 0) {
+ if (rc != ENOTSUP)
+ goto fail1;
+
+ /* Fallback for old firmware without privilege mask support */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ /* Assume PF has admin privilege */
+ mask = EF10_LEGACY_PF_PRIVILEGE_MASK;
+ } else {
+ /* VF is always unprivileged by default */
+ mask = EF10_LEGACY_VF_PRIVILEGE_MASK;
+ }
+ }
+
+ *maskp = mask;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Table of mapping schemes from port number to the number of the external
+ * connector on the board. The external numbering does not distinguish
+ * off-board separated outputs such as from multi-headed cables.
+ *
+ * The count of adjacent port numbers that map to each external port
+ * and the offset in the numbering, is determined by the chip family and
+ * current port mode.
+ *
+ * For the Huntington family, the current port mode cannot be discovered,
+ * so the mapping used is instead the last match in the table to the full
+ * set of port modes to which the NIC can be configured. Therefore the
+ * ordering of entries in the the mapping table is significant.
+ */
+static struct {
+ efx_family_t family;
+ uint32_t modes_mask;
+ int32_t count;
+ int32_t offset;
+} __ef10_external_port_mappings[] = {
+ /* Supported modes with 1 output per external port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G),
+ 1,
+ 1
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G) |
+ (1 << TLV_PORT_MODE_10G_10G),
+ 1,
+ 1
+ },
+ /* Supported modes with 2 outputs per external port */
+ {
+ EFX_FAMILY_HUNTINGTON,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G),
+ 2,
+ 1
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_40G) |
+ (1 << TLV_PORT_MODE_40G_40G) |
+ (1 << TLV_PORT_MODE_40G_10G_10G) |
+ (1 << TLV_PORT_MODE_10G_10G_40G) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2),
+ 2,
+ 1
+ },
+ /* Supported modes with 4 outputs per external port */
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q) |
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q1),
+ 4,
+ 1,
+ },
+ {
+ EFX_FAMILY_MEDFORD,
+ (1 << TLV_PORT_MODE_10G_10G_10G_10G_Q2),
+ 4,
+ 2
+ },
+};
+
+ __checkReturn efx_rc_t
+ef10_external_port_mapping(
+ __in efx_nic_t *enp,
+ __in uint32_t port,
+ __out uint8_t *external_portp)
+{
+ efx_rc_t rc;
+ int i;
+ uint32_t port_modes;
+ uint32_t matches;
+ uint32_t current;
+ int32_t count = 1; /* Default 1-1 mapping */
+ int32_t offset = 1; /* Default starting external port number */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, &current)) != 0) {
+ /*
+ * No current port mode information
+ * - infer mapping from available modes
+ */
+ if ((rc = efx_mcdi_get_port_modes(enp,
+ &port_modes, NULL)) != 0) {
+ /*
+ * No port mode information available
+ * - use default mapping
+ */
+ goto out;
+ }
+ } else {
+ /* Only need to scan the current mode */
+ port_modes = 1 << current;
+ }
+
+ /*
+ * Infer the internal port -> external port mapping from
+ * the possible port modes for this NIC.
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) {
+ if (__ef10_external_port_mappings[i].family !=
+ enp->en_family)
+ continue;
+ matches = (__ef10_external_port_mappings[i].modes_mask &
+ port_modes);
+ if (matches != 0) {
+ count = __ef10_external_port_mappings[i].count;
+ offset = __ef10_external_port_mappings[i].offset;
+ port_modes &= ~matches;
+ }
+ }
+
+ if (port_modes != 0) {
+ /* Some advertised modes are not supported */
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+out:
+ /*
+ * Scale as required by last matched mode and then convert to
+ * correctly offset numbering
+ */
+ *external_portp = (uint8_t)((port / count) + offset);
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Read and clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ if (rc != EACCES)
+ goto fail2;
+
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail3;
+
+ if ((rc = enop->eno_board_cfg(enp)) != 0)
+ if (rc != EACCES)
+ goto fail4;
+
+ /*
+ * Set default driver config limits (based on board config).
+ *
+ * FIXME: For now allocate a fixed number of VIs which is likely to be
+ * sufficient and small enough to allow multiple functions on the same
+ * port.
+ */
+ edcp->edc_min_vi_count = edcp->edc_max_vi_count =
+ MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit));
+
+ /* The client driver must configure and enable PIO buffer support */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail5;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail6;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0) {
+ /* Unprivileged functions do not have access to sensors */
+ if (rc != EACCES)
+ goto fail7;
+ }
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail7:
+ EFSYS_PROBE(fail7);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail6:
+ EFSYS_PROBE(fail6);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail5:
+ EFSYS_PROBE(fail5);
+#endif
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_evq_count, max_evq_count;
+ uint32_t min_rxq_count, max_rxq_count;
+ uint32_t min_txq_count, max_txq_count;
+ efx_rc_t rc;
+
+ if (edlp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get minimum required and maximum usable VI limits */
+ min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit);
+ min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit);
+ min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_min_vi_count =
+ MAX(min_evq_count, MAX(min_rxq_count, min_txq_count));
+
+ max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit);
+ max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit);
+ max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit);
+
+ edcp->edc_max_vi_count =
+ MAX(max_evq_count, MAX(max_rxq_count, max_txq_count));
+
+ /*
+ * Check limits for sub-allocated piobuf blocks.
+ * PIO is optional, so don't fail if the limits are incorrect.
+ */
+ if ((encp->enc_piobuf_size == 0) ||
+ (encp->enc_piobuf_limit == 0) ||
+ (edlp->edl_min_pio_alloc_size == 0) ||
+ (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) {
+ /* Disable PIO */
+ edcp->edc_max_piobuf_count = 0;
+ edcp->edc_pio_alloc_size = 0;
+ } else {
+ uint32_t blk_size, blk_count, blks_per_piobuf;
+
+ blk_size =
+ MAX(edlp->edl_min_pio_alloc_size,
+ encp->enc_piobuf_min_alloc_size);
+
+ blks_per_piobuf = encp->enc_piobuf_size / blk_size;
+ EFSYS_ASSERT3U(blks_per_piobuf, <=, 32);
+
+ blk_count = (encp->enc_piobuf_limit * blks_per_piobuf);
+
+ /* A zero max pio alloc count means unlimited */
+ if ((edlp->edl_max_pio_alloc_count > 0) &&
+ (edlp->edl_max_pio_alloc_count < blk_count)) {
+ blk_count = edlp->edl_max_pio_alloc_count;
+ }
+
+ edcp->edc_pio_alloc_size = blk_size;
+ edcp->edc_max_piobuf_count =
+ (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_ENTITY_RESET_IN_LEN,
+ MC_CMD_ENTITY_RESET_OUT_LEN)];
+ efx_rc_t rc;
+
+ /* ef10_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG,
+ ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ /* Clear RX/TX DMA queue errors */
+ enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_drv_cfg_t *edcp = &(enp->en_drv_cfg);
+ uint32_t min_vi_count, max_vi_count;
+ uint32_t vi_count, vi_base, vi_shift;
+ uint32_t i;
+ uint32_t retry;
+ uint32_t delay_us;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ /* Allocate (optional) on-chip PIO buffers */
+ ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count);
+
+ /*
+ * For best performance, PIO writes should use a write-combined
+ * (WC) memory mapping. Using a separate WC mapping for the PIO
+ * aperture of each VI would be a burden to drivers (and not
+ * possible if the host page size is >4Kbyte).
+ *
+ * To avoid this we use a single uncached (UC) mapping for VI
+ * register access, and a single WC mapping for extra VIs used
+ * for PIO writes.
+ *
+ * Each piobuf must be linked to a VI in the WC mapping, and to
+ * each VI that is using a sub-allocated block from the piobuf.
+ */
+ min_vi_count = edcp->edc_min_vi_count;
+ max_vi_count =
+ edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Ensure that the previously attached driver's VIs are freed */
+ if ((rc = efx_mcdi_free_vis(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this
+ * fails then retrying the request for fewer VI resources may succeed.
+ */
+ vi_count = 0;
+ if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count,
+ &vi_base, &vi_count, &vi_shift)) != 0)
+ goto fail3;
+
+ EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count);
+
+ if (vi_count < min_vi_count) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+
+ enp->en_arch.ef10.ena_vi_base = vi_base;
+ enp->en_arch.ef10.ena_vi_count = vi_count;
+ enp->en_arch.ef10.ena_vi_shift = vi_shift;
+
+ if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) {
+ /* Not enough extra VIs to map piobufs */
+ ef10_nic_free_piobufs(enp);
+ }
+
+ enp->en_arch.ef10.ena_pio_write_vi_base =
+ vi_count - enp->en_arch.ef10.ena_piobuf_count;
+
+ /* Save UC memory mapping details */
+ enp->en_arch.ef10.ena_uc_mem_map_offset = 0;
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_pio_write_vi_base);
+ } else {
+ enp->en_arch.ef10.ena_uc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_vi_count);
+ }
+
+ /* Save WC memory mapping details */
+ enp->en_arch.ef10.ena_wc_mem_map_offset =
+ enp->en_arch.ef10.ena_uc_mem_map_offset +
+ enp->en_arch.ef10.ena_uc_mem_map_size;
+
+ enp->en_arch.ef10.ena_wc_mem_map_size =
+ (ER_DZ_TX_PIOBUF_STEP *
+ enp->en_arch.ef10.ena_piobuf_count);
+
+ /* Link piobufs to extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_link_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i,
+ enp->en_arch.ef10.ena_piobuf_handle[i]);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ /*
+ * Allocate a vAdaptor attached to our upstream vPort/pPort.
+ *
+ * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF
+ * driver has yet to bring up the EVB port. See bug 56147. In this case,
+ * retry the request several times after waiting a while. The wait time
+ * between retries starts small (10ms) and exponentially increases.
+ * Total wait time is a little over two seconds. Retry logic in the
+ * client driver may mean this whole loop is repeated if it continues to
+ * fail.
+ */
+ retry = 0;
+ delay_us = 10000;
+ while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) ||
+ (rc != ENOENT)) {
+ /*
+ * Do not retry alloc for PF, or for other errors on
+ * a VF.
+ */
+ goto fail5;
+ }
+
+ /* VF startup before PF is ready. Retry allocation. */
+ if (retry > 5) {
+ /* Too many attempts */
+ rc = EINVAL;
+ goto fail6;
+ }
+ EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry);
+ EFSYS_SLEEP(delay_us);
+ retry++;
+ if (delay_us < 500000)
+ delay_us <<= 2;
+ }
+
+ enp->en_vport_id = EVB_PORT_ID_ASSIGNED;
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2;
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+
+ ef10_nic_free_piobufs(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *vi_countp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Report VIs that the client driver can use.
+ * Do not include VIs used for PIO buffer writes.
+ */
+ *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * TODO: Specify host memory mapping alignment and granularity
+ * in efx_drv_limits_t so that they can be taken into account
+ * when allocating extra VIs for PIO writes.
+ */
+ switch (region) {
+ case EFX_REGION_VI:
+ /* UC mapped memory BAR region for VI registers */
+ *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_uc_mem_map_size;
+ break;
+
+ case EFX_REGION_PIO_WRITE_VI:
+ /* WC mapped memory BAR region for piobuf writes */
+ *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset;
+ *sizep = enp->en_arch.ef10.ena_wc_mem_map_size;
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_nic_fini(
+ __in efx_nic_t *enp)
+{
+ uint32_t i;
+ efx_rc_t rc;
+
+ (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id);
+ enp->en_vport_id = 0;
+
+ /* Unlink piobufs from extra VIs in WC mapping */
+ if (enp->en_arch.ef10.ena_piobuf_count > 0) {
+ for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) {
+ rc = efx_mcdi_unlink_piobuf(enp,
+ enp->en_arch.ef10.ena_pio_write_vi_base + i);
+ if (rc != 0)
+ break;
+ }
+ }
+
+ ef10_nic_free_piobufs(enp);
+
+ (void) efx_mcdi_free_vis(enp);
+ enp->en_arch.ef10.ena_vi_count = 0;
+}
+
+ void
+ef10_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+ef10_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(enp))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_nvram.c b/drivers/net/sfc/base/ef10_nvram.c
new file mode 100644
index 00000000..3f9d3750
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_nvram.c
@@ -0,0 +1,2385 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+#include "ef10_tlv_layout.h"
+
+/* Cursor for TLV partition format */
+typedef struct tlv_cursor_s {
+ uint32_t *block; /* Base of data block */
+ uint32_t *current; /* Cursor position */
+ uint32_t *end; /* End tag position */
+ uint32_t *limit; /* Last dword of data block */
+} tlv_cursor_t;
+
+typedef struct nvram_partition_s {
+ uint16_t type;
+ uint8_t chip_select;
+ uint8_t flags;
+ /*
+ * The full length of the NVRAM partition.
+ * This is different from tlv_partition_header.total_length,
+ * which can be smaller.
+ */
+ uint32_t length;
+ uint32_t erase_size;
+ uint32_t *data;
+ tlv_cursor_t tlv_cursor;
+} nvram_partition_t;
+
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor);
+
+
+static void
+tlv_init_block(
+ __out uint32_t *block)
+{
+ *block = __CPU_TO_LE_32(TLV_TAG_END);
+}
+
+static uint32_t
+tlv_tag(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, tag;
+
+ dword = cursor->current[0];
+ tag = __LE_TO_CPU_32(dword);
+
+ return (tag);
+}
+
+static size_t
+tlv_length(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t dword, length;
+
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (0);
+
+ dword = cursor->current[1];
+ length = __LE_TO_CPU_32(dword);
+
+ return ((size_t)length);
+}
+
+static uint8_t *
+tlv_value(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)(&cursor->current[2]));
+}
+
+static uint8_t *
+tlv_item(
+ __in tlv_cursor_t *cursor)
+{
+ if (tlv_tag(cursor) == TLV_TAG_END)
+ return (NULL);
+
+ return ((uint8_t *)cursor->current);
+}
+
+/*
+ * TLV item DWORD length is tag + length + value (rounded up to DWORD)
+ * equivalent to tlv_n_words_for_len in mc-comms tlv.c
+ */
+#define TLV_DWORD_COUNT(length) \
+ (1 + 1 + (((length) + sizeof (uint32_t) - 1) / sizeof (uint32_t)))
+
+
+static uint32_t *
+tlv_next_item_ptr(
+ __in tlv_cursor_t *cursor)
+{
+ uint32_t length;
+
+ length = tlv_length(cursor);
+
+ return (cursor->current + TLV_DWORD_COUNT(length));
+}
+
+static __checkReturn efx_rc_t
+tlv_advance(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (cursor->current == cursor->end) {
+ /* No more tags after END tag */
+ cursor->current = NULL;
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ /* Advance to next item and validate */
+ cursor->current = tlv_next_item_ptr(cursor);
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_rewind(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ cursor->current = cursor->block;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_find(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag)
+{
+ efx_rc_t rc;
+
+ rc = tlv_rewind(cursor);
+ while (rc == 0) {
+ if (tlv_tag(cursor) == tag)
+ break;
+
+ rc = tlv_advance(cursor);
+ }
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_validate_state(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ /* Check cursor position */
+ if (cursor->current < cursor->block) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (cursor->current > cursor->limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (tlv_tag(cursor) != TLV_TAG_END) {
+ /* Check current item has space for tag and length */
+ if (cursor->current > (cursor->limit - 2)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ /* Check we have value data for current item and another tag */
+ if (tlv_next_item_ptr(cursor) > (cursor->limit - 1)) {
+ cursor->current = NULL;
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static efx_rc_t
+tlv_init_cursor(
+ __out tlv_cursor_t *cursor,
+ __in uint32_t *block,
+ __in uint32_t *limit,
+ __in uint32_t *current)
+{
+ cursor->block = block;
+ cursor->limit = limit;
+
+ cursor->current = current;
+ cursor->end = NULL;
+
+ return (tlv_validate_state(cursor));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_from_size(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size)
+{
+ uint32_t *limit;
+ limit = (uint32_t *)(block + size - sizeof (uint32_t));
+ return (tlv_init_cursor(cursor, (uint32_t *)block,
+ limit, (uint32_t *)block));
+}
+
+static __checkReturn efx_rc_t
+tlv_init_cursor_at_offset(
+ __out tlv_cursor_t *cursor,
+ __in_bcount(size)
+ uint8_t *block,
+ __in size_t size,
+ __in size_t offset)
+{
+ uint32_t *limit;
+ uint32_t *current;
+ limit = (uint32_t *)(block + size - sizeof (uint32_t));
+ current = (uint32_t *)(block + offset);
+ return (tlv_init_cursor(cursor, (uint32_t *)block, limit, current));
+}
+
+static __checkReturn efx_rc_t
+tlv_require_end(
+ __inout tlv_cursor_t *cursor)
+{
+ uint32_t *pos;
+ efx_rc_t rc;
+
+ if (cursor->end == NULL) {
+ pos = cursor->current;
+ if ((rc = tlv_find(cursor, TLV_TAG_END)) != 0)
+ goto fail1;
+
+ cursor->end = cursor->current;
+ cursor->current = pos;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static size_t
+tlv_block_length_used(
+ __inout tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ /* Return space used (including the END tag) */
+ return (cursor->end + 1 - cursor->block) * sizeof (uint32_t);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static uint32_t *
+tlv_last_segment_end(
+ __in tlv_cursor_t *cursor)
+{
+ tlv_cursor_t segment_cursor;
+ uint32_t *last_segment_end = cursor->block;
+ uint32_t *segment_start = cursor->block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the pointer to its end tag.
+ */
+ for (;;) {
+ if (tlv_init_cursor(&segment_cursor, segment_start,
+ cursor->limit, segment_start) != 0)
+ break;
+ if (tlv_require_end(&segment_cursor) != 0)
+ break;
+ last_segment_end = segment_cursor.end;
+ segment_start = segment_cursor.end + 1;
+ }
+
+ return (last_segment_end);
+}
+
+
+static uint32_t *
+tlv_write(
+ __in tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size) uint8_t *data,
+ __in size_t size)
+{
+ uint32_t len = size;
+ uint32_t *ptr;
+
+ ptr = cursor->current;
+
+ *ptr++ = __CPU_TO_LE_32(tag);
+ *ptr++ = __CPU_TO_LE_32(len);
+
+ if (len > 0) {
+ ptr[(len - 1) / sizeof (uint32_t)] = 0;
+ memcpy(ptr, data, len);
+ ptr += P2ROUNDUP(len, sizeof (uint32_t)) / sizeof (*ptr);
+ }
+
+ return (ptr);
+}
+
+static __checkReturn efx_rc_t
+tlv_insert(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail2;
+
+ if (tag == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ delta = TLV_DWORD_COUNT(size);
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ /* Move data up: new space at cursor->current */
+ memmove(cursor->current + delta, cursor->current,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ /* Write new TLV item */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_delete(
+ __inout tlv_cursor_t *cursor)
+{
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ delta = TLV_DWORD_COUNT(tlv_length(cursor));
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail3;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ /* Shuffle things down, destroying the item at cursor->current */
+ memmove(cursor->current, cursor->current + delta,
+ (last_segment_end + 1 - cursor->current) * sizeof (uint32_t));
+ /* Zero the new space at the end of the TLV chain */
+ memset(last_segment_end + 1 - delta, 0, delta * sizeof (uint32_t));
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+tlv_modify(
+ __inout tlv_cursor_t *cursor,
+ __in uint32_t tag,
+ __in_bcount(size)
+ uint8_t *data,
+ __in size_t size)
+{
+ uint32_t *pos;
+ unsigned int old_ndwords;
+ unsigned int new_ndwords;
+ unsigned int delta;
+ uint32_t *last_segment_end;
+ efx_rc_t rc;
+
+ if ((rc = tlv_validate_state(cursor)) != 0)
+ goto fail1;
+
+ if (tlv_tag(cursor) == TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ if (tlv_tag(cursor) != tag) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ old_ndwords = TLV_DWORD_COUNT(tlv_length(cursor));
+ new_ndwords = TLV_DWORD_COUNT(size);
+
+ if ((rc = tlv_require_end(cursor)) != 0)
+ goto fail4;
+
+ last_segment_end = tlv_last_segment_end(cursor);
+
+ if (new_ndwords > old_ndwords) {
+ /* Expand space used for TLV item */
+ delta = new_ndwords - old_ndwords;
+ pos = cursor->current + old_ndwords;
+
+ if (last_segment_end + 1 + delta > cursor->limit) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Move up: new space at (cursor->current + old_ndwords) */
+ memmove(pos + delta, pos,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end += delta;
+
+ } else if (new_ndwords < old_ndwords) {
+ /* Shrink space used for TLV item */
+ delta = old_ndwords - new_ndwords;
+ pos = cursor->current + new_ndwords;
+
+ /* Move down: remove words at (cursor->current + new_ndwords) */
+ memmove(pos, pos + delta,
+ (last_segment_end + 1 - pos) * sizeof (uint32_t));
+
+ /* Zero the new space at the end of the TLV chain */
+ memset(last_segment_end + 1 - delta, 0,
+ delta * sizeof (uint32_t));
+
+ /* Adjust the end pointer */
+ cursor->end -= delta;
+ }
+
+ /* Write new data */
+ tlv_write(cursor, tag, data, size);
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t checksum_tlv_partition(
+ __in nvram_partition_t *partition)
+{
+ tlv_cursor_t *cursor;
+ uint32_t *ptr;
+ uint32_t *end;
+ uint32_t csum;
+ size_t len;
+
+ cursor = &partition->tlv_cursor;
+ len = tlv_block_length_used(cursor);
+ EFSYS_ASSERT3U((len & 3), ==, 0);
+
+ csum = 0;
+ ptr = partition->data;
+ end = &ptr[len >> 2];
+
+ while (ptr < end)
+ csum += __LE_TO_CPU_32(*ptr++);
+
+ return (csum);
+}
+
+static __checkReturn efx_rc_t
+tlv_update_partition_len_and_cks(
+ __in tlv_cursor_t *cursor)
+{
+ efx_rc_t rc;
+ nvram_partition_t partition;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t new_len;
+
+ /*
+ * We just modified the partition, so the total length may not be
+ * valid. Don't use tlv_find(), which performs some sanity checks
+ * that may fail here.
+ */
+ partition.data = cursor->block;
+ memcpy(&partition.tlv_cursor, cursor, sizeof (*cursor));
+ header = (struct tlv_partition_header *)partition.data;
+ /* Sanity check. */
+ if (__LE_TO_CPU_32(header->tag) != TLV_TAG_PARTITION_HEADER) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ new_len = tlv_block_length_used(&partition.tlv_cursor);
+ if (new_len == 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ header->total_length = __CPU_TO_LE_32(new_len);
+ /* Ensure the modified partition always has a new generation count. */
+ header->generation = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(header->generation) + 1);
+
+ trailer = (struct tlv_partition_trailer *)((uint8_t *)header +
+ new_len - sizeof (*trailer) - sizeof (uint32_t));
+ trailer->generation = header->generation;
+ trailer->checksum = __CPU_TO_LE_32(
+ __LE_TO_CPU_32(trailer->checksum) -
+ checksum_tlv_partition(&partition));
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_validate(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((partn_data == NULL) || (partn_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* The partition header must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)partn_data,
+ partn_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV partition length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > partn_size) {
+ rc = EFBIG;
+ goto fail4;
+ }
+
+ /* Check partition ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /* Check generation counts are consistent */
+ if (trailer->generation != header->generation) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ /* Verify partition checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(partn_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_create(
+ __in efx_nic_t *enp,
+ __in uint16_t partn_type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ uint32_t *buf = (uint32_t *)partn_data;
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header header;
+ struct tlv_partition_trailer trailer;
+
+ unsigned int min_buf_size = sizeof (struct tlv_partition_header) +
+ sizeof (struct tlv_partition_trailer);
+ if (partn_size < min_buf_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ memset(buf, 0xff, partn_size);
+
+ tlv_init_block(buf);
+ if ((rc = tlv_init_cursor(&cursor, buf,
+ (uint32_t *)((uint8_t *)buf + partn_size),
+ buf)) != 0) {
+ goto fail2;
+ }
+
+ header.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_HEADER);
+ header.length = __CPU_TO_LE_32(sizeof (header) - 8);
+ header.type_id = __CPU_TO_LE_16(partn_type);
+ header.preset = 0;
+ header.generation = __CPU_TO_LE_32(1);
+ header.total_length = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(
+ &cursor, TLV_TAG_PARTITION_HEADER,
+ (uint8_t *)&header.type_id, sizeof (header) - 8)) != 0)
+ goto fail3;
+ if ((rc = tlv_advance(&cursor)) != 0)
+ goto fail4;
+
+ trailer.tag = __CPU_TO_LE_32(TLV_TAG_PARTITION_TRAILER);
+ trailer.length = __CPU_TO_LE_32(sizeof (trailer) - 8);
+ trailer.generation = header.generation;
+ trailer.checksum = 0; /* This will be fixed below. */
+ if ((rc = tlv_insert(&cursor, TLV_TAG_PARTITION_TRAILER,
+ (uint8_t *)&trailer.generation, sizeof (trailer) - 8)) != 0)
+ goto fail5;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail6;
+
+ /* Check that the partition is valid. */
+ if ((rc = ef10_nvram_buffer_validate(enp, partn_type,
+ partn_data, partn_size)) != 0)
+ goto fail7;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint32_t
+byte_offset(
+ __in uint32_t *position,
+ __in uint32_t *base)
+{
+ return (uint32_t)((uint8_t *)position - (uint8_t *)base);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_item_start(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp)
+{
+ /* Read past partition header to find start address of the first key */
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_find_end(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp)
+{
+ /* Read to end of partition */
+ tlv_cursor_t cursor;
+ efx_rc_t rc;
+ uint32_t *segment_used;
+
+ _NOTE(ARGUNUSED(offset))
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ segment_used = cursor.block;
+
+ /*
+ * Go through each segment and check that it has an end tag. If there
+ * is no end tag then the previous segment was the last valid one,
+ * so return the used space including that end tag.
+ */
+ while (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ if (tlv_require_end(&cursor) != 0) {
+ if (segment_used == cursor.block) {
+ /*
+ * First segment is corrupt, so there is
+ * no valid data in partition.
+ */
+ rc = EINVAL;
+ goto fail2;
+ }
+ break;
+ }
+ segment_used = cursor.end + 1;
+
+ cursor.current = segment_used;
+ }
+ /* Return space used (including the END tag) */
+ *endp = (segment_used - cursor.block) * sizeof (uint32_t);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+ef10_nvram_buffer_find_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp)
+{
+ /* Find TLV at offset and return key start and length */
+ tlv_cursor_t cursor;
+ uint8_t *key;
+ uint32_t tag;
+
+ if (tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset) != 0) {
+ return (B_FALSE);
+ }
+
+ while ((key = tlv_item(&cursor)) != NULL) {
+ tag = tlv_tag(&cursor);
+ if (tag == TLV_TAG_PARTITION_HEADER ||
+ tag == TLV_TAG_PARTITION_TRAILER) {
+ if (tlv_advance(&cursor) != 0) {
+ break;
+ }
+ continue;
+ }
+ *startp = byte_offset(cursor.current, cursor.block);
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_get_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(item_max_size, *lengthp)
+ caddr_t itemp,
+ __in size_t item_max_size,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ uint32_t item_length;
+
+ if (item_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail2;
+ }
+
+ item_length = tlv_length(&cursor);
+ if (length < item_length) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ memcpy(itemp, tlv_value(&cursor), item_length);
+
+ *lengthp = item_length;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_insert_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ rc = tlv_insert(&cursor, TLV_TAG_LICENSE, (uint8_t *)keyp, length);
+
+ if (rc != 0) {
+ goto fail2;
+ }
+
+ *lengthp = byte_offset(tlv_next_item_ptr(&cursor),
+ cursor.current);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_delete_item(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ _NOTE(ARGUNUSED(length, end))
+
+ if ((rc = tlv_init_cursor_at_offset(&cursor, (uint8_t *)bufferp,
+ buffer_size, offset)) != 0) {
+ goto fail1;
+ }
+
+ if ((rc = tlv_delete(&cursor)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_buffer_finish(
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)bufferp,
+ buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ if ((rc = tlv_require_end(&cursor)) != 0)
+ goto fail2;
+
+ if ((rc = tlv_update_partition_len_and_cks(&cursor)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+
+/*
+ * Read and validate a segment from a partition. A segment is a complete
+ * tlv chain between PARTITION_HEADER and PARTITION_END tags. There may
+ * be multiple segments in a partition, so seg_offset allows segments
+ * beyond the first to be read.
+ */
+static __checkReturn efx_rc_t
+ef10_nvram_read_tlv_segment(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in size_t seg_offset,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ size_t total_length;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(sizeof (*header) <= EF10_NVRAM_CHUNK);
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Read initial chunk of the segment, starting at offset */
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn, seg_offset, seg_data,
+ EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0) {
+ goto fail2;
+ }
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ total_length = __LE_TO_CPU_32(header->total_length);
+ if (total_length > max_seg_size) {
+ rc = EFBIG;
+ goto fail5;
+ }
+
+ /* Read the remaining segment content */
+ if (total_length > EF10_NVRAM_CHUNK) {
+ if ((rc = ef10_nvram_partn_read_mode(enp, partn,
+ seg_offset + EF10_NVRAM_CHUNK,
+ seg_data + EF10_NVRAM_CHUNK,
+ total_length - EF10_NVRAM_CHUNK,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT)) != 0)
+ goto fail6;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail8;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Check data read from segment is consistent */
+ if (trailer->generation != header->generation) {
+ /*
+ * The partition data may have been modified between successive
+ * MCDI NVRAM_READ requests by the MC or another PCI function.
+ *
+ * The caller must retry to obtain consistent partition data.
+ */
+ rc = EAGAIN;
+ goto fail10;
+ }
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < total_length; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ return (0);
+
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Read a single TLV item from a host memory
+ * buffer containing a TLV formatted segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_read_tlv(
+ __in efx_nic_t *enp,
+ __in_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*sizep) caddr_t *datap,
+ __out size_t *sizep)
+{
+ tlv_cursor_t cursor;
+ caddr_t data;
+ size_t length;
+ caddr_t value;
+ efx_rc_t rc;
+
+ if ((seg_data == NULL) || (max_seg_size == 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Find requested TLV tag in segment data */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ if ((rc = tlv_find(&cursor, tag)) != 0) {
+ rc = ENOENT;
+ goto fail3;
+ }
+ value = (caddr_t)tlv_value(&cursor);
+ length = tlv_length(&cursor);
+
+ if (length == 0)
+ data = NULL;
+ else {
+ /* Copy out data from TLV item */
+ EFSYS_KMEM_ALLOC(enp->en_esip, length, data);
+ if (data == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ memcpy(data, value, length);
+ }
+
+ *datap = data;
+ *sizep = length;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Read a single TLV item from the first segment in a TLV formatted partition */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __deref_out_bcount_opt(*seg_sizep) caddr_t *seg_datap,
+ __out size_t *seg_sizep)
+{
+ caddr_t seg_data = NULL;
+ size_t partn_size = 0;
+ size_t length;
+ caddr_t data;
+ int retry;
+ efx_rc_t rc;
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ if (partn_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, seg_data);
+ if (seg_data == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /*
+ * Read the first segment in a TLV partition. Retry until consistent
+ * segment contents are returned. Inconsistent data may be read if:
+ * a) the segment contents are invalid
+ * b) the MC has rebooted while we were reading the partition
+ * c) the partition has been modified while we were reading it
+ * Limit retry attempts to ensure forward progress.
+ */
+ retry = 10;
+ do {
+ rc = ef10_nvram_read_tlv_segment(enp, partn, 0,
+ seg_data, partn_size);
+ } while ((rc == EAGAIN) && (--retry > 0));
+
+ if (rc != 0) {
+ /* Failed to obtain consistent segment data */
+ goto fail4;
+ }
+
+ if ((rc = ef10_nvram_buf_read_tlv(enp, seg_data, partn_size,
+ tag, &data, &length)) != 0)
+ goto fail5;
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+
+ *seg_datap = data;
+ *seg_sizep = length;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, seg_data);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Compute the size of a segment. */
+ static __checkReturn efx_rc_t
+ef10_nvram_buf_segment_size(
+ __in caddr_t seg_data,
+ __in size_t max_seg_size,
+ __out size_t *seg_sizep)
+{
+ efx_rc_t rc;
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ uint32_t cksum;
+ int pos;
+ uint32_t *end_tag_position;
+ uint32_t segment_length;
+
+ /* A PARTITION_HEADER tag must be the first item at the given offset */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Check TLV segment length (includes the END tag) */
+ *seg_sizep = __LE_TO_CPU_32(header->total_length);
+ if (*seg_sizep > max_seg_size) {
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ /* Check segment ends with PARTITION_TRAILER and END tags */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ if ((rc = tlv_advance(&cursor)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_END) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ end_tag_position = cursor.current;
+
+ /* Verify segment checksum */
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *seg_sizep; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ /*
+ * Calculate total length from HEADER to END tags and compare to
+ * max_seg_size and the total_length field in the HEADER tag.
+ */
+ segment_length = tlv_block_length_used(&cursor);
+
+ if (segment_length > max_seg_size) {
+ rc = EINVAL;
+ goto fail8;
+ }
+
+ if (segment_length != *seg_sizep) {
+ rc = EINVAL;
+ goto fail9;
+ }
+
+ /* Skip over the first HEADER tag. */
+ rc = tlv_rewind(&cursor);
+ rc = tlv_advance(&cursor);
+
+ while (rc == 0) {
+ if (tlv_tag(&cursor) == TLV_TAG_END) {
+ /* Check that the END tag is the one found earlier. */
+ if (cursor.current != end_tag_position)
+ goto fail10;
+ break;
+ }
+ /* Check for duplicate HEADER tags before the END tag. */
+ if (tlv_tag(&cursor) == TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail11;
+ }
+
+ rc = tlv_advance(&cursor);
+ }
+ if (rc != 0)
+ goto fail12;
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in a host memory buffer containing a TLV
+ * formatted segment. Historically partitions consisted of only one segment.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_buf_write_tlv(
+ __inout_bcount(max_seg_size) caddr_t seg_data,
+ __in size_t max_seg_size,
+ __in uint32_t tag,
+ __in_bcount(tag_size) caddr_t tag_data,
+ __in size_t tag_size,
+ __out size_t *total_lengthp)
+{
+ tlv_cursor_t cursor;
+ struct tlv_partition_header *header;
+ struct tlv_partition_trailer *trailer;
+ uint32_t generation;
+ uint32_t cksum;
+ int pos;
+ efx_rc_t rc;
+
+ /* A PARTITION_HEADER tag must be the first item (at offset zero) */
+ if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
+ max_seg_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ if (tlv_tag(&cursor) != TLV_TAG_PARTITION_HEADER) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ header = (struct tlv_partition_header *)tlv_item(&cursor);
+
+ /* Update the TLV chain to contain the new data */
+ if ((rc = tlv_find(&cursor, tag)) == 0) {
+ /* Modify existing TLV item */
+ if ((rc = tlv_modify(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0)
+ goto fail3;
+ } else {
+ /* Insert a new TLV item before the PARTITION_TRAILER */
+ rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER);
+ if (rc != 0) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ if ((rc = tlv_insert(&cursor, tag,
+ (uint8_t *)tag_data, tag_size)) != 0) {
+ rc = EINVAL;
+ goto fail5;
+ }
+ }
+
+ /* Find the trailer tag */
+ if ((rc = tlv_find(&cursor, TLV_TAG_PARTITION_TRAILER)) != 0) {
+ rc = EINVAL;
+ goto fail6;
+ }
+ trailer = (struct tlv_partition_trailer *)tlv_item(&cursor);
+
+ /* Update PARTITION_HEADER and PARTITION_TRAILER fields */
+ *total_lengthp = tlv_block_length_used(&cursor);
+ if (*total_lengthp > max_seg_size) {
+ rc = ENOSPC;
+ goto fail7;
+ }
+ generation = __LE_TO_CPU_32(header->generation) + 1;
+
+ header->total_length = __CPU_TO_LE_32(*total_lengthp);
+ header->generation = __CPU_TO_LE_32(generation);
+ trailer->generation = __CPU_TO_LE_32(generation);
+
+ /* Recompute PARTITION_TRAILER checksum */
+ trailer->checksum = 0;
+ cksum = 0;
+ for (pos = 0; (size_t)pos < *total_lengthp; pos += sizeof (uint32_t)) {
+ cksum += *((uint32_t *)(seg_data + pos));
+ }
+ trailer->checksum = ~cksum + 1;
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in the first segment of a TLV formatted
+ * dynamic config partition. The first segment is the current active
+ * configuration.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ return ef10_nvram_partn_write_segment_tlv(enp, partn, tag, data,
+ size, B_FALSE);
+}
+
+/*
+ * Read a segment from nvram at the given offset into a buffer (segment_data)
+ * and optionally write a new tag to it.
+ */
+static __checkReturn efx_rc_t
+ef10_nvram_segment_write_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout caddr_t *seg_datap,
+ __inout size_t *partn_offsetp,
+ __inout size_t *src_remain_lenp,
+ __inout size_t *dest_remain_lenp,
+ __in boolean_t write)
+{
+ efx_rc_t rc;
+ efx_rc_t status;
+ size_t original_segment_size;
+ size_t modified_segment_size;
+
+ /*
+ * Read the segment from NVRAM into the segment_data buffer and validate
+ * it, returning if it does not validate. This is not a failure unless
+ * this is the first segment in a partition. In this case the caller
+ * must propagate the error.
+ */
+ status = ef10_nvram_read_tlv_segment(enp, partn, *partn_offsetp,
+ *seg_datap, *src_remain_lenp);
+ if (status != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ status = ef10_nvram_buf_segment_size(*seg_datap,
+ *src_remain_lenp, &original_segment_size);
+ if (status != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ if (write) {
+ /* Update the contents of the segment in the buffer */
+ if ((rc = ef10_nvram_buf_write_tlv(*seg_datap,
+ *dest_remain_lenp, tag, data, size,
+ &modified_segment_size)) != 0) {
+ goto fail3;
+ }
+ *dest_remain_lenp -= modified_segment_size;
+ *seg_datap += modified_segment_size;
+ } else {
+ /*
+ * We won't modify this segment, but still need to update the
+ * remaining lengths and pointers.
+ */
+ *dest_remain_lenp -= original_segment_size;
+ *seg_datap += original_segment_size;
+ }
+
+ *partn_offsetp += original_segment_size;
+ *src_remain_lenp -= original_segment_size;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Add or update a single TLV item in either the first segment or in all
+ * segments in a TLV formatted dynamic config partition. Dynamic config
+ * partitions on boards that support RFID are divided into a number of segments,
+ * each formatted like a partition, with header, trailer and end tags. The first
+ * segment is the current active configuration.
+ *
+ * The segments are initialised by manftest and each contain a different
+ * configuration e.g. firmware variant. The firmware can be instructed
+ * via RFID to copy a segment to replace the first segment, hence changing the
+ * active configuration. This allows ops to change the configuration of a board
+ * prior to shipment using RFID.
+ *
+ * Changes to the dynamic config may need to be written to all segments (e.g.
+ * firmware versions) or just the first segment (changes to the active
+ * configuration). See SF-111324-SW "The use of RFID in Solarflare Products".
+ * If only the first segment is written the code still needs to be aware of the
+ * possible presence of subsequent segments as writing to a segment may cause
+ * its size to increase, which would overwrite the subsequent segments and
+ * invalidate them.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write_segment_tlv(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t tag,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t all_segments)
+{
+ size_t partn_size = 0;
+ caddr_t partn_data;
+ size_t total_length = 0;
+ efx_rc_t rc;
+ size_t current_offset = 0;
+ size_t remaining_original_length;
+ size_t remaining_modified_length;
+ caddr_t segment_data;
+
+ EFSYS_ASSERT3U(partn, ==, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG);
+
+ /* Allocate sufficient memory for the entire partition */
+ if ((rc = ef10_nvram_partn_size(enp, partn, &partn_size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_size, partn_data);
+ if (partn_data == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ remaining_original_length = partn_size;
+ remaining_modified_length = partn_size;
+ segment_data = partn_data;
+
+ /* Lock the partition */
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail3;
+
+ /* Iterate over each (potential) segment to update it. */
+ do {
+ boolean_t write = all_segments || current_offset == 0;
+
+ rc = ef10_nvram_segment_write_tlv(enp, partn, tag, data, size,
+ &segment_data, &current_offset, &remaining_original_length,
+ &remaining_modified_length, write);
+ if (rc != 0) {
+ if (current_offset == 0) {
+ /*
+ * If no data has been read then the first
+ * segment is invalid, which is an error.
+ */
+ goto fail4;
+ }
+ break;
+ }
+ } while (current_offset < partn_size);
+
+ total_length = segment_data - partn_data;
+
+ /*
+ * We've run out of space. This should actually be dealt with by
+ * ef10_nvram_buf_write_tlv returning ENOSPC.
+ */
+ if (total_length > partn_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Erase the whole partition in NVRAM */
+ if ((rc = ef10_nvram_partn_erase(enp, partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write new partition contents from the buffer to NVRAM */
+ if ((rc = ef10_nvram_partn_write(enp, partn, 0, partn_data,
+ total_length)) != 0)
+ goto fail7;
+
+ /* Unlock the partition */
+ ef10_nvram_partn_unlock(enp, partn, NULL);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ ef10_nvram_partn_unlock(enp, partn, NULL);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_size, partn_data);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Get the size of a NVRAM partition. This is the total size allocated in nvram,
+ * not the data used by the segments in the partition.
+ */
+ __checkReturn efx_rc_t
+ef10_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, EF10_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset,
+ data, chunk, mode)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ /*
+ * Read requests which come in through the EFX API expect to
+ * read the current, active partition.
+ */
+ return ef10_nvram_partn_read_mode(enp, partn, offset, data, size,
+ MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+ uint32_t erase_size;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ &erase_size, NULL)) != 0)
+ goto fail1;
+
+ if (erase_size == 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0)
+ goto fail2;
+ } else {
+ if (size % erase_size != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ while (size > 0) {
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset,
+ erase_size)) != 0)
+ goto fail4;
+ offset += erase_size;
+ size -= erase_size;
+ }
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ uint32_t write_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, NULL, NULL,
+ NULL, &write_size)) != 0)
+ goto fail1;
+
+ if (write_size != 0) {
+ /*
+ * Check that the size is a multiple of the write chunk size if
+ * the write chunk size is available.
+ */
+ if (size % write_size != 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ } else {
+ write_size = EF10_NVRAM_CHUNK;
+ }
+
+ while (size > 0) {
+ chunk = MIN(size, write_size);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail3;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt uint32_t *resultp)
+{
+ boolean_t reboot = B_FALSE;
+ efx_rc_t rc;
+
+ if (resultp != NULL)
+ *resultp = MC_CMD_NVRAM_VERIFY_RC_UNKNOWN;
+
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, resultp);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ struct tlv_partition_version partn_version;
+ size_t size;
+ efx_rc_t rc;
+
+ /* Add or modify partition version TLV item */
+ partn_version.version_w = __CPU_TO_LE_16(version[0]);
+ partn_version.version_x = __CPU_TO_LE_16(version[1]);
+ partn_version.version_y = __CPU_TO_LE_16(version[2]);
+ partn_version.version_z = __CPU_TO_LE_16(version[3]);
+
+ size = sizeof (partn_version) - (2 * sizeof (uint32_t));
+
+ /* Write the version number to all segments in the partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ TLV_TAG_PARTITION_VERSION(partn),
+ (caddr_t)&partn_version.version_w, size, B_TRUE)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct ef10_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} ef10_parttbl_entry_t;
+
+/* Translate EFX NVRAM types to firmware partition types */
+static ef10_parttbl_entry_t hunt_parttbl[] = {
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 3, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 4, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE}
+};
+
+static ef10_parttbl_entry_t medford_parttbl[] = {
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 1, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 2, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 3, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE, 4, EFX_NVRAM_MC_FIRMWARE},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 3, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 4, EFX_NVRAM_MC_GOLDEN},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 1, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 2, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 3, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_ROM, 4, EFX_NVRAM_BOOTROM},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 2, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 3, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 4, EFX_NVRAM_BOOTROM_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 1, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 2, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 3, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 4, EFX_NVRAM_DYNAMIC_CFG},
+ {NVRAM_PARTITION_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 3, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA, 4, EFX_NVRAM_FPGA},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 3, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_FPGA_BACKUP, 4, EFX_NVRAM_FPGA_BACKUP},
+ {NVRAM_PARTITION_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 3, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_LICENSE, 4, EFX_NVRAM_LICENSE},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 1, EFX_NVRAM_UEFIROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 2, EFX_NVRAM_UEFIROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 3, EFX_NVRAM_UEFIROM},
+ {NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 4, EFX_NVRAM_UEFIROM}
+};
+
+static __checkReturn efx_rc_t
+ef10_parttbl_get(
+ __in efx_nic_t *enp,
+ __out ef10_parttbl_entry_t **parttblp,
+ __out size_t *parttbl_rowsp)
+{
+ switch (enp->en_family) {
+ case EFX_FAMILY_HUNTINGTON:
+ *parttblp = hunt_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(hunt_parttbl);
+ break;
+
+ case EFX_FAMILY_MEDFORD:
+ *parttblp = medford_parttbl;
+ *parttbl_rowsp = EFX_ARRAY_SIZE(medford_parttbl);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ return (EINVAL);
+ }
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if (entry->nvtype == type &&
+ entry->port == emip->emi_port) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_DIAG
+
+static __checkReturn efx_rc_t
+ef10_nvram_partn_to_type(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out efx_nvram_type_t *typep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ ef10_parttbl_entry_t *parttbl = NULL;
+ size_t parttbl_rows = 0;
+ unsigned int i;
+
+ EFSYS_ASSERT(typep != NULL);
+
+ if (ef10_parttbl_get(enp, &parttbl, &parttbl_rows) == 0) {
+ for (i = 0; i < parttbl_rows; i++) {
+ ef10_parttbl_entry_t *entry = &parttbl[i];
+
+ if (entry->partn == partn &&
+ entry->port == emip->emi_port) {
+ *typep = entry->nvtype;
+ return (0);
+ }
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_nvram_type_t type;
+ unsigned int npartns = 0;
+ uint32_t *partns = NULL;
+ size_t size;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /* Read available partitions from NVRAM partition map */
+ size = MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM * sizeof (uint32_t);
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, partns);
+ if (partns == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_partitions(enp, (caddr_t)partns, size,
+ &npartns)) != 0) {
+ goto fail2;
+ }
+
+ for (i = 0; i < npartns; i++) {
+ /* Check if the partition is supported for this port */
+ if ((rc = ef10_nvram_partn_to_type(enp, partns[i], &type)) != 0)
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, partns[i])) != 0)
+ goto fail3;
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, size, partns);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ efx_rc_t rc;
+
+ /* FIXME: get highest partn version from all ports */
+ /* FIXME: return partn description if available */
+
+ if ((rc = efx_mcdi_nvram_metadata(enp, partn, subtypep,
+ version, NULL, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_partn_lock(enp, partn)) != 0)
+ goto fail1;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = EF10_NVRAM_CHUNK;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_partn_unlock(enp, partn, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_phy.c b/drivers/net/sfc/base/ef10_phy.c
new file mode 100644
index 00000000..81309f29
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_phy.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static void
+mcdi_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_40000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+mcdi_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 40000 && fd)
+ *link_modep = EFX_LINK_40000FDX;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_GENERATE)
+ *fcntlp = EFX_FCNTL_GENERATE;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+
+ void
+ef10_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_40G:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ mcdi_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ mcdi_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = ef10_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_get_link(
+ __in efx_nic_t *enp,
+ __out ef10_link_state_t *elsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &elsp->els_adv_cap_mask);
+ mcdi_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &elsp->els_lp_cap_mask);
+
+ mcdi_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &elsp->els_link_mode, &elsp->els_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ elsp->els_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ elsp->els_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN)];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ boolean_t supported;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_link_control_supported(enp, &supported)) != 0)
+ goto fail1;
+ if (supported == B_FALSE)
+ goto out;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+ /* Too many fields for for POPULATE macros, so insert this afterwards */
+ MCDI_IN_SET_DWORD_FIELD(req, SET_LINK_IN_CAP,
+ PHY_CAP_40000FDX, (cap_mask >> EFX_PHY_CAP_40000FDX) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ case EFX_LINK_40000FDX:
+ speed = 40000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+out:
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+ __checkReturn efx_rc_t
+ef10_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ /* TBD: no stats support in firmware yet */
+ _NOTE(ARGUNUSED(enp, esmp))
+ memset(stat, 0, EFX_PHY_NSTATS * sizeof (*stat));
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+ef10_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_enable_offline(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ uint32_t result;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(type))
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ if (result == MC_CMD_POLL_BIST_FAILED &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MEM_LEN &&
+ count > EFX_BIST_MEM_ECC_FATAL) {
+ if (valuesp != NULL) {
+ valuesp[EFX_BIST_MEM_TEST] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_TEST);
+ valuesp[EFX_BIST_MEM_ADDR] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ADDR);
+ valuesp[EFX_BIST_MEM_BUS] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_BUS);
+ valuesp[EFX_BIST_MEM_EXPECT] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_EXPECT);
+ valuesp[EFX_BIST_MEM_ACTUAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ACTUAL);
+ valuesp[EFX_BIST_MEM_ECC] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC);
+ valuesp[EFX_BIST_MEM_ECC_PARITY] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_PARITY);
+ valuesp[EFX_BIST_MEM_ECC_FATAL] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MEM_ECC_FATAL);
+ }
+ value_mask |= (1 << EFX_BIST_MEM_TEST) |
+ (1 << EFX_BIST_MEM_ADDR) |
+ (1 << EFX_BIST_MEM_BUS) |
+ (1 << EFX_BIST_MEM_EXPECT) |
+ (1 << EFX_BIST_MEM_ACTUAL) |
+ (1 << EFX_BIST_MEM_ECC) |
+ (1 << EFX_BIST_MEM_ECC_PARITY) |
+ (1 << EFX_BIST_MEM_ECC_FATAL);
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_XFI_FARMI &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on EF10. */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_rx.c b/drivers/net/sfc/base/ef10_rx.c
new file mode 100644
index 00000000..b65faedd
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_rx.c
@@ -0,0 +1,965 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t size,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in efsys_mem_t *esmp,
+ __in boolean_t disable_scatter,
+ __in uint32_t ps_bufsize)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_INIT_RXQ_EXT_IN_LEN,
+ MC_CMD_INIT_RXQ_EXT_OUT_LEN)];
+ int npages = EFX_RXQ_NBUFS(size);
+ int i;
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ efx_rc_t rc;
+ uint32_t dma_mode;
+
+ /* If this changes, then the payload size might need to change. */
+ EFSYS_ASSERT3U(MC_CMD_INIT_RXQ_OUT_LEN, ==, 0);
+ EFSYS_ASSERT3U(size, <=, EFX_RXQ_MAXNDESCS);
+
+ if (ps_bufsize > 0)
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM;
+ else
+ dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_RXQ_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_RXQ_EXT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance);
+ MCDI_IN_POPULATE_DWORD_8(req, INIT_RXQ_EXT_IN_FLAGS,
+ INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0,
+ INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0,
+ INIT_RXQ_EXT_IN_CRC_MODE, 0,
+ INIT_RXQ_EXT_IN_FLAG_PREFIX, 1,
+ INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, disable_scatter,
+ INIT_RXQ_EXT_IN_DMA_MODE,
+ dma_mode,
+ INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, ps_bufsize);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_rxq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_RXQ_IN_LEN,
+ MC_CMD_FINI_RXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_RXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+efx_mcdi_rss_context_alloc(
+ __in efx_nic_t *enp,
+ __in efx_rx_scale_support_t scale_support,
+ __in uint32_t num_queues,
+ __out uint32_t *rss_contextp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN,
+ MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)];
+ uint32_t rss_context;
+ uint32_t context_type;
+ efx_rc_t rc;
+
+ if (num_queues > EFX_MAXRSS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (scale_support) {
+ case EFX_RX_SCALE_EXCLUSIVE:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE;
+ break;
+ case EFX_RX_SCALE_SHARED:
+ context_type = MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_ALLOC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_TYPE, context_type);
+ /* NUM_QUEUES is only used to validate indirection table offsets */
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, num_queues);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ rss_context = MCDI_OUT_DWORD(req, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = ENOENT;
+ goto fail5;
+ }
+
+ *rss_contextp = rss_context;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_free(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_FREE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_FREE_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_FREE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_FREE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_FREE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID, rss_context);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_flags(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in efx_rx_hash_type_t type)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_FLAGS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ MCDI_IN_POPULATE_DWORD_4(req, RSS_CONTEXT_SET_FLAGS_IN_FLAGS,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN,
+ (type & EFX_RX_HASH_IPV4) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN,
+ (type & EFX_RX_HASH_TCPIPV4) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN,
+ (type & EFX_RX_HASH_IPV6) ? 1 : 0,
+ RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN,
+ (type & EFX_RX_HASH_TCPIPV6) ? 1 : 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_key(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN)];
+ efx_rc_t rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_KEY;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ EFSYS_ASSERT3U(n, ==, MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ if (n != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ memcpy(MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY),
+ key, n);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+static efx_rc_t
+efx_mcdi_rss_context_set_table(
+ __in efx_nic_t *enp,
+ __in uint32_t rss_context,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN,
+ MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN)];
+ uint8_t *req_table;
+ int i, rc;
+
+ if (rss_context == EF10_RSS_CONTEXT_INVALID) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_RSS_CONTEXT_SET_TABLE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ rss_context);
+
+ req_table =
+ MCDI_IN2(req, uint8_t, RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE);
+
+ for (i = 0;
+ i < MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN;
+ i++) {
+ req_table[i] = (n > 0) ? (uint8_t)table[i % n] : 0;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+ __checkReturn efx_rc_t
+ef10_rx_init(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+
+ if (efx_mcdi_rss_context_alloc(enp, EFX_RX_SCALE_EXCLUSIVE, EFX_MAXRSS,
+ &enp->en_rss_context) == 0) {
+ /*
+ * Allocated an exclusive RSS context, which allows both the
+ * indirection table and key to be modified.
+ */
+ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+ } else {
+ /*
+ * Failed to allocate an exclusive RSS context. Continue
+ * operation without support for RSS. The pseudo-header in
+ * received packets will not contain a Toeplitz hash value.
+ */
+ enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+ enp->en_hash_support = EFX_RX_HASH_UNAVAILABLE;
+ }
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+ef10_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ _NOTE(ARGUNUSED(enp, buf_size))
+ return (0);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(alg, ==, EFX_RX_HASHALG_TOEPLITZ);
+ EFSYS_ASSERT3U(insert, ==, B_TRUE);
+
+ if ((alg != EFX_RX_HASHALG_TOEPLITZ) || (insert == B_FALSE)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_flags(enp,
+ enp->en_rss_context, type)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_key(enp,
+ enp->en_rss_context, key, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+ef10_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_rc_t rc;
+
+ if (enp->en_rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_rss_context_set_table(enp,
+ enp->en_rss_context, table, n)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+
+/*
+ * EF10 RX pseudo-header
+ * ---------------------
+ *
+ * Receive packets are prefixed by an (optional) 14 byte pseudo-header:
+ *
+ * +00: Toeplitz hash value.
+ * (32bit little-endian)
+ * +04: Outer VLAN tag. Zero if the packet did not have an outer VLAN tag.
+ * (16bit big-endian)
+ * +06: Inner VLAN tag. Zero if the packet did not have an inner VLAN tag.
+ * (16bit big-endian)
+ * +08: Packet Length. Zero if the RX datapath was in cut-through mode.
+ * (16bit little-endian)
+ * +10: MAC timestamp. Zero if timestamping is not enabled.
+ * (32bit little-endian)
+ *
+ * See "The RX Pseudo-header" in SF-109306-TC.
+ */
+
+ __checkReturn efx_rc_t
+ef10_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ /*
+ * The RX pseudo-header contains the packet length, excluding the
+ * pseudo-header. If the hardware receive datapath was operating in
+ * cut-through mode then the length in the RX pseudo-header will be
+ * zero, and the packet length must be obtained from the DMA length
+ * reported in the RX event.
+ */
+ *lengthp = buffer[8] | (buffer[9] << 8);
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+ef10_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ switch (func) {
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return (buffer[0] |
+ (buffer[1] << 8) |
+ (buffer[2] << 16) |
+ (buffer[3] << 24));
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ void
+ef10_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + n, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < n; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ ESF_DZ_RX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ ESF_DZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+ void
+ef10_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ wptr = P2ALIGN(added, EF10_RX_WPTR_ALIGN);
+ if (pushed == wptr)
+ return;
+
+ *pushedp = wptr;
+
+ /* Push the populated descriptors out */
+ wptr &= erp->er_mask;
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR, wptr);
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ void
+ef10_rx_qps_update_credits(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_dword_t dword;
+ efx_evq_rxq_state_t *rxq_state =
+ &erp->er_eep->ee_rxq_state[erp->er_label];
+
+ EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
+
+ if (rxq_state->eers_rx_packed_stream_credits == 0)
+ return;
+
+ EFX_POPULATE_DWORD_3(dword,
+ ERF_DZ_RX_DESC_MAGIC_DOORBELL, 1,
+ ERF_DZ_RX_DESC_MAGIC_CMD,
+ ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS,
+ ERF_DZ_RX_DESC_MAGIC_DATA,
+ rxq_state->eers_rx_packed_stream_credits);
+ EFX_BAR_TBL_WRITED(enp, ER_DZ_RX_DESC_UPD_REG,
+ erp->er_index, &dword, B_FALSE);
+
+ rxq_state->eers_rx_packed_stream_credits = 0;
+}
+
+ __checkReturn uint8_t *
+ef10_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ uint16_t buf_len;
+ uint8_t *pkt_start;
+ efx_qword_t *qwordp;
+ efx_evq_rxq_state_t *rxq_state =
+ &erp->er_eep->ee_rxq_state[erp->er_label];
+
+ EFSYS_ASSERT(rxq_state->eers_rx_packed_stream);
+
+ buffer += current_offset;
+ pkt_start = buffer + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE;
+
+ qwordp = (efx_qword_t *)buffer;
+ *timestamp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_TSTAMP);
+ *lengthp = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_ORIG_LEN);
+ buf_len = EFX_QWORD_FIELD(*qwordp, ES_DZ_PS_RX_PREFIX_CAP_LEN);
+
+ buf_len = P2ROUNDUP(buf_len + EFX_RX_PACKED_STREAM_RX_PREFIX_SIZE,
+ EFX_RX_PACKED_STREAM_ALIGNMENT);
+ *next_offsetp =
+ current_offset + buf_len + EFX_RX_PACKED_STREAM_ALIGNMENT;
+
+ EFSYS_ASSERT3U(*next_offsetp, <=, buffer_length);
+ EFSYS_ASSERT3U(current_offset + *lengthp, <, *next_offsetp);
+
+ if ((*next_offsetp ^ current_offset) &
+ EFX_RX_PACKED_STREAM_MEM_PER_CREDIT) {
+ if (rxq_state->eers_rx_packed_stream_credits <
+ EFX_RX_PACKED_STREAM_MAX_CREDITS)
+ rxq_state->eers_rx_packed_stream_credits++;
+ }
+
+ return (pkt_start);
+}
+
+
+#endif
+
+ __checkReturn efx_rc_t
+ef10_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_rxq(enp, erp->er_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(erp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+ boolean_t disable_scatter;
+ unsigned int ps_buf_size;
+
+ _NOTE(ARGUNUSED(id, erp))
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS == (1 << ESF_DZ_RX_QLABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ case EFX_RXQ_TYPE_SCATTER:
+ ps_buf_size = 0;
+ break;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ case EFX_RXQ_TYPE_PACKED_STREAM_1M:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_512K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_256K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_128K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K;
+ break;
+ case EFX_RXQ_TYPE_PACKED_STREAM_64K:
+ ps_buf_size = MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K;
+ break;
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+ default:
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+ if (ps_buf_size != 0) {
+ /* Check if datapath firmware supports packed stream mode */
+ if (encp->enc_rx_packed_stream_supported == B_FALSE) {
+ rc = ENOTSUP;
+ goto fail4;
+ }
+ /* Check if packed stream allows configurable buffer sizes */
+ if ((type != EFX_RXQ_TYPE_PACKED_STREAM_1M) &&
+ (encp->enc_rx_var_packed_stream_supported == B_FALSE)) {
+ rc = ENOTSUP;
+ goto fail5;
+ }
+ }
+#else /* EFSYS_OPT_RX_PACKED_STREAM */
+ EFSYS_ASSERT(ps_buf_size == 0);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+ /* Scatter can only be disabled if the firmware supports doing so */
+ if (type == EFX_RXQ_TYPE_SCATTER)
+ disable_scatter = B_FALSE;
+ else
+ disable_scatter = encp->enc_rx_disable_scatter_supported;
+
+ if ((rc = efx_mcdi_init_rxq(enp, n, eep->ee_index, label, index,
+ esmp, disable_scatter, ps_buf_size)) != 0)
+ goto fail6;
+
+ erp->er_eep = eep;
+ erp->er_label = label;
+
+ ef10_ev_rxlabel_init(eep, erp, label, ps_buf_size != 0);
+
+ return (0);
+
+fail6:
+ EFSYS_PROBE(fail6);
+#if EFSYS_OPT_RX_PACKED_STREAM
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_evq_t *eep = erp->er_eep;
+ unsigned int label = erp->er_label;
+
+ ef10_ev_rxlabel_fini(eep, label);
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+ void
+ef10_rx_fini(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_rss_support != EFX_RX_SCALE_UNAVAILABLE) {
+ (void) efx_mcdi_rss_context_free(enp, enp->en_rss_context);
+ }
+ enp->en_rss_context = 0;
+ enp->en_rss_support = EFX_RX_SCALE_UNAVAILABLE;
+#else
+ _NOTE(ARGUNUSED(enp))
+#endif /* EFSYS_OPT_RX_SCALE */
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_tlv_layout.h b/drivers/net/sfc/base/ef10_tlv_layout.h
new file mode 100644
index 00000000..7d099b81
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_tlv_layout.h
@@ -0,0 +1,941 @@
+/**************************************************************************\
+*//*! \file
+** <L5_PRIVATE L5_SOURCE>
+** \author mjs
+** \brief TLV item layouts for EF10 static and dynamic config in NVRAM
+** \date 2012/11/20
+** \cop (c) Solarflare Communications Inc.
+** </L5_PRIVATE>
+*//*
+\**************************************************************************/
+
+/* These structures define the layouts for the TLV items stored in static and
+ * dynamic configuration partitions in NVRAM for EF10 (Huntington etc.).
+ *
+ * They contain the same sort of information that was kept in the
+ * siena_mc_static_config_hdr_t and siena_mc_dynamic_config_hdr_t structures
+ * (defined in <ci/mgmt/mc_flash_layout.h> and <ci/mgmt/mc_dynamic_cfg.h>) for
+ * Siena.
+ *
+ * These are used directly by the MC and should also be usable directly on host
+ * systems which are little-endian and do not do strange things with structure
+ * padding. (Big-endian host systems will require some byte-swapping.)
+ *
+ * -----
+ *
+ * Please refer to SF-108797-SW for a general overview of the TLV partition
+ * format.
+ *
+ * -----
+ *
+ * The current tag IDs have a general structure: with the exception of the
+ * special values defined in the document, they are of the form 0xLTTTNNNN,
+ * where:
+ *
+ * - L is a location, indicating where this tag is expected to be found:
+ * 0: static configuration
+ * 1: dynamic configuration
+ * 2: firmware internal use
+ * 3: license partition
+ *
+ * - TTT is a type, which is just a unique value. The same type value
+ * might appear in both locations, indicating a relationship between
+ * the items (e.g. static and dynamic VPD below).
+ *
+ * - NNNN is an index of some form. Some item types are per-port, some
+ * are per-PF, some are per-partition-type.
+ *
+ * -----
+ *
+ * As with the previous Siena structures, each structure here is laid out
+ * carefully: values are aligned to their natural boundary, with explicit
+ * padding fields added where necessary. (No, technically this does not
+ * absolutely guarantee portability. But, in practice, compilers are generally
+ * sensible enough not to introduce completely pointless padding, and it works
+ * well enough.)
+ */
+
+
+#ifndef CI_MGMT_TLV_LAYOUT_H
+#define CI_MGMT_TLV_LAYOUT_H
+
+
+/* ----------------------------------------------------------------------------
+ * General structure (defined by SF-108797-SW)
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* The "end" tag.
+ *
+ * (Note that this is *not* followed by length or value fields: anything after
+ * the tag itself is irrelevant.)
+ */
+
+#define TLV_TAG_END (0xEEEEEEEE)
+
+
+/* Other special reserved tag values.
+ */
+
+#define TLV_TAG_SKIP (0x00000000)
+#define TLV_TAG_INVALID (0xFFFFFFFF)
+
+
+/* TLV partition header.
+ *
+ * In a TLV partition, this must be the first item in the sequence, at offset
+ * 0.
+ */
+
+#define TLV_TAG_PARTITION_HEADER (0xEF10DA7A)
+
+struct tlv_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+/* 0 indicates the default segment (always located at offset 0), while other values
+ * are for RFID-selectable presets that should immediately follow the default segment.
+ * The default segment may also have preset > 0, which means that it is a preset
+ * selected through an RFID command and copied by FW to the location at offset 0. */
+ uint16_t preset;
+ uint32_t generation;
+ uint32_t total_length;
+};
+
+
+/* TLV partition trailer.
+ *
+ * In a TLV partition, this must be the last item in the sequence, immediately
+ * preceding the TLV_TAG_END word.
+ */
+
+#define TLV_TAG_PARTITION_TRAILER (0xEF101A57)
+
+struct tlv_partition_trailer {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t generation;
+ uint32_t checksum;
+};
+
+
+/* Appendable TLV partition header.
+ *
+ * In an appendable TLV partition, this must be the first item in the sequence,
+ * at offset 0. (Note that, unlike the configuration partitions, there is no
+ * trailer before the TLV_TAG_END word.)
+ */
+
+#define TLV_TAG_APPENDABLE_PARTITION_HEADER (0xEF10ADA7)
+
+struct tlv_appendable_partition_header {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t type_id;
+ uint16_t reserved;
+};
+
+
+/* ----------------------------------------------------------------------------
+ * Configuration items
+ * ----------------------------------------------------------------------------
+ */
+
+
+/* NIC global capabilities.
+ */
+
+#define TLV_TAG_GLOBAL_CAPABILITIES (0x00010000)
+
+struct tlv_global_capabilities {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t flags;
+};
+
+
+/* Siena-style per-port MAC address allocation.
+ *
+ * There are <count> addresses, starting at <base_address> and incrementing
+ * by adding <stride> to the low-order byte(s).
+ *
+ * (See also TLV_TAG_GLOBAL_MAC for an alternative, specifying a global pool
+ * of contiguous MAC addresses for the firmware to allocate as it sees fit.)
+ */
+
+#define TLV_TAG_PORT_MAC(port) (0x00020000 + (port))
+
+struct tlv_port_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved;
+ uint16_t count;
+ uint16_t stride;
+};
+
+
+/* Static VPD.
+ *
+ * This is the portion of VPD which is set at manufacturing time and not
+ * expected to change. It is formatted as a standard PCI VPD block. There are
+ * global and per-pf TLVs for this, the global TLV is new for Medford and is
+ * used in preference to the per-pf TLV.
+ */
+
+#define TLV_TAG_PF_STATIC_VPD(pf) (0x00030000 + (pf))
+
+struct tlv_pf_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_STATIC_VPD (0x001f0000)
+
+struct tlv_global_static_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* Dynamic VPD.
+ *
+ * This is the portion of VPD which may be changed (e.g. by firmware updates).
+ * It is formatted as a standard PCI VPD block. There are global and per-pf TLVs
+ * for this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DYNAMIC_VPD(pf) (0x10030000 + (pf))
+
+struct tlv_pf_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+#define TLV_TAG_GLOBAL_DYNAMIC_VPD (0x10200000)
+
+struct tlv_global_dynamic_vpd {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+};
+
+
+/* "DBI" PCI config space changes.
+ *
+ * This is a set of edits made to the default PCI config space values before
+ * the device is allowed to enumerate. There are global and per-pf TLVs for
+ * this, the global TLV is new for Medford and is used in preference to the
+ * per-pf TLV.
+ */
+
+#define TLV_TAG_PF_DBI(pf) (0x00040000 + (pf))
+
+struct tlv_pf_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+#define TLV_TAG_GLOBAL_DBI (0x00210000)
+
+struct tlv_global_dbi {
+ uint32_t tag;
+ uint32_t length;
+ struct {
+ uint16_t addr;
+ uint16_t byte_enables;
+ uint32_t value;
+ } items[];
+};
+
+
+/* Partition subtype codes.
+ *
+ * A subtype may optionally be stored for each type of partition present in
+ * the NVRAM. For example, this may be used to allow a generic firmware update
+ * utility to select a specific variant of firmware for a specific variant of
+ * board.
+ *
+ * The description[] field is an optional string which is returned in the
+ * MC_CMD_NVRAM_METADATA response if present.
+ */
+
+#define TLV_TAG_PARTITION_SUBTYPE(type) (0x00050000 + (type))
+
+struct tlv_partition_subtype {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t subtype;
+ uint8_t description[];
+};
+
+
+/* Partition version codes.
+ *
+ * A version may optionally be stored for each type of partition present in
+ * the NVRAM. This provides a standard way of tracking the currently stored
+ * version of each of the various component images.
+ */
+
+#define TLV_TAG_PARTITION_VERSION(type) (0x10060000 + (type))
+
+struct tlv_partition_version {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t version_w;
+ uint16_t version_x;
+ uint16_t version_y;
+ uint16_t version_z;
+};
+
+/* Global PCIe configuration */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG (0x10070000)
+
+struct tlv_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ int16_t max_pf_number; /**< Largest PF RID (lower PFs may be hidden) */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+#define TLV_MAX_PF_DEFAULT (-1) /* Use FW default for largest PF RID */
+#define TLV_APER_DEFAULT (0xFFFF) /* Use FW default for a given aperture */
+};
+
+/* Per-PF configuration. Note that not all these fields are necessarily useful
+ * as the apertures are constrained by the BIU settings (the one case we do
+ * use is to make BAR2 bigger than the BIU thinks to reserve space), but we can
+ * tidy things up later */
+
+#define TLV_TAG_PF_PCIE_CONFIG(pf) (0x10080000 + (pf))
+
+struct tlv_per_pf_pcie_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t vfs_total;
+ uint8_t port_allocation;
+ uint16_t vectors_per_pf;
+ uint16_t vectors_per_vf;
+ uint8_t pf_bar0_aperture;
+ uint8_t pf_bar2_aperture;
+ uint8_t vf_bar0_aperture;
+ uint8_t vf_base;
+ uint16_t supp_pagesz;
+ uint16_t msix_vec_base;
+};
+
+
+/* Development ONLY. This is a single TLV tag for all the gubbins
+ * that can be set through the MC command-line other than the PCIe
+ * settings. This is a temporary measure. */
+#define TLV_TAG_TMP_GUBBINS (0x10090000) /* legacy symbol - do not use */
+#define TLV_TAG_TMP_GUBBINS_HUNT TLV_TAG_TMP_GUBBINS
+
+struct tlv_tmp_gubbins {
+ uint32_t tag;
+ uint32_t length;
+ /* Consumed by dpcpu.c */
+ uint64_t tx0_tags; /* Bitmap */
+ uint64_t tx1_tags; /* Bitmap */
+ uint64_t dl_tags; /* Bitmap */
+ uint32_t flags;
+#define TLV_DPCPU_TX_STRIPE (1) /* No longer used, has no effect */
+#define TLV_DPCPU_BIU_TAGS (2) /* Use BIU tag manager */
+#define TLV_DPCPU_TX0_TAGS (4) /* tx0_tags is valid */
+#define TLV_DPCPU_TX1_TAGS (8) /* tx1_tags is valid */
+#define TLV_DPCPU_DL_TAGS (16) /* dl_tags is valid */
+ /* Consumed by features.c */
+ uint32_t dut_features; /* All 1s -> leave alone */
+ int8_t with_rmon; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* Consumed by clocks_hunt.c */
+ int8_t clk_mode; /* 0 -> off, 1 -> on, -1 -> leave alone */
+ /* No longer used, superseded by TLV_TAG_DESCRIPTOR_CACHE_CONFIG. */
+ int8_t rx_dc_size; /* -1 -> leave alone */
+ int8_t tx_dc_size;
+ int16_t num_q_allocs;
+};
+
+/* Global port configuration
+ *
+ * This is now deprecated in favour of a platform-provided default
+ * and dynamic config override via tlv_global_port_options.
+ */
+#define TLV_TAG_GLOBAL_PORT_CONFIG (0x000a0000)
+
+struct tlv_global_port_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t ports_per_core;
+ uint32_t max_port_speed;
+};
+
+
+/* Firmware options.
+ *
+ * This is intended for user-configurable selection of optional firmware
+ * features and variants.
+ *
+ * Initially, this consists only of the satellite CPU firmware variant
+ * selection, but this tag could be extended in the future (using the
+ * tag length to determine whether additional fields are present).
+ */
+
+#define TLV_TAG_FIRMWARE_OPTIONS (0x100b0000)
+
+struct tlv_firmware_options {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t firmware_variant;
+#define TLV_FIRMWARE_VARIANT_DRIVER_SELECTED (0xffffffff)
+
+/* These are the values for overriding the driver's choice; the definitions
+ * are taken from MCDI so that they don't get out of step. Include
+ * <ci/mgmt/mc_driver_pcol.h> or the equivalent from your driver's tree if
+ * you need to use these constants.
+ */
+#define TLV_FIRMWARE_VARIANT_FULL_FEATURED MC_CMD_FW_FULL_FEATURED
+#define TLV_FIRMWARE_VARIANT_LOW_LATENCY MC_CMD_FW_LOW_LATENCY
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM MC_CMD_FW_PACKED_STREAM
+#define TLV_FIRMWARE_VARIANT_HIGH_TX_RATE MC_CMD_FW_HIGH_TX_RATE
+#define TLV_FIRMWARE_VARIANT_PACKED_STREAM_HASH_MODE_1 \
+ MC_CMD_FW_PACKED_STREAM_HASH_MODE_1
+#define TLV_FIRMWARE_VARIANT_RULES_ENGINE MC_CMD_FW_RULES_ENGINE
+};
+
+/* Voltage settings
+ *
+ * Intended for boards with A0 silicon where the core voltage may
+ * need tweaking. Most likely set once when the pass voltage is
+ * determined. */
+
+#define TLV_TAG_0V9_SETTINGS (0x000c0000)
+
+struct tlv_0v9_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t flags; /* Boards with high 0v9 settings may need active cooling */
+#define TLV_TAG_0V9_REQUIRES_FAN (1)
+ uint16_t target_voltage; /* In millivolts */
+ /* Since the limits are meant to be centred to the target (and must at least
+ * contain it) they need setting as well. */
+ uint16_t warn_low; /* In millivolts */
+ uint16_t warn_high; /* In millivolts */
+ uint16_t panic_low; /* In millivolts */
+ uint16_t panic_high; /* In millivolts */
+};
+
+
+/* Clock configuration */
+
+#define TLV_TAG_CLOCK_CONFIG (0x000d0000) /* legacy symbol - do not use */
+#define TLV_TAG_CLOCK_CONFIG_HUNT TLV_TAG_CLOCK_CONFIG
+
+struct tlv_clock_config {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_icore; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+#define TLV_TAG_CLOCK_CONFIG_MEDFORD (0x00100000)
+
+struct tlv_clock_config_medford {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t clk_sys; /* MHz */
+ uint16_t clk_mc; /* MHz */
+ uint16_t clk_rmon; /* MHz */
+ uint16_t clk_vswitch; /* MHz */
+ uint16_t clk_dpcpu; /* MHz */
+ uint16_t clk_pcs; /* MHz */
+};
+
+
+/* EF10-style global pool of MAC addresses.
+ *
+ * There are <count> addresses, starting at <base_address>, which are
+ * contiguous. Firmware is responsible for allocating addresses from this
+ * pool to ports / PFs as appropriate.
+ */
+
+#define TLV_TAG_GLOBAL_MAC (0x000e0000)
+
+struct tlv_global_mac {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t base_address[6];
+ uint16_t reserved1;
+ uint16_t count;
+ uint16_t reserved2;
+};
+
+#define TLV_TAG_ATB_0V9_TARGET (0x000f0000) /* legacy symbol - do not use */
+#define TLV_TAG_ATB_0V9_TARGET_HUNT TLV_TAG_ATB_0V9_TARGET
+
+/* The target value for the 0v9 power rail measured on-chip at the
+ * analogue test bus */
+struct tlv_0v9_atb_target {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t millivolts;
+ uint16_t reserved;
+};
+
+/* Factory settings for amplitude calibration of the PCIE TX serdes */
+#define TLV_TAG_TX_PCIE_AMP_CONFIG (0x00220000)
+struct tlv_pcie_tx_amp_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t quad_tx_imp2k[4];
+ uint8_t quad_tx_imp50[4];
+ uint8_t lane_amp[16];
+};
+
+
+/* Global PCIe configuration, second revision. This represents the visible PFs
+ * by a bitmap rather than having the number of the highest visible one. As such
+ * it can (for a 16-PF chip) represent a superset of what TLV_TAG_GLOBAL_PCIE_CONFIG
+ * can and it should be used in place of that tag in future (but compatibility with
+ * the old tag will be left in the firmware indefinitely). */
+
+#define TLV_TAG_GLOBAL_PCIE_CONFIG_R2 (0x10100000)
+
+struct tlv_pcie_config_r2 {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t visible_pfs; /**< Bitmap of visible PFs */
+ uint16_t pf_aper; /**< BIU aperture for PF BAR2 */
+ uint16_t vf_aper; /**< BIU aperture for VF BAR0 */
+ uint16_t int_aper; /**< BIU aperture for PF BAR4 and VF BAR2 */
+};
+
+/* Dynamic port mode.
+ *
+ * Allows selecting alternate port configuration for platforms that support it
+ * (e.g. 1x40G vs 2x10G on Milano, 1x40G vs 4x10G on Medford). This affects the
+ * number of externally visible ports (and, hence, PF to port mapping), so must
+ * be done at boot time.
+ *
+ * This tag supercedes tlv_global_port_config.
+ */
+
+#define TLV_TAG_GLOBAL_PORT_MODE (0x10110000)
+
+struct tlv_global_port_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t port_mode;
+#define TLV_PORT_MODE_DEFAULT (0xffffffff) /* Default for given platform */
+#define TLV_PORT_MODE_10G (0) /* 10G, single SFP/10G-KR */
+#define TLV_PORT_MODE_40G (1) /* 40G, single QSFP/40G-KR */
+#define TLV_PORT_MODE_10G_10G (2) /* 2x10G, dual SFP/10G-KR or single QSFP */
+#define TLV_PORT_MODE_40G_40G (3) /* 40G + 40G, dual QSFP/40G-KR (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G (4) /* 2x10G + 2x10G, quad SFP/10G-KR or dual QSFP (Greenport) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1 (4) /* 4x10G, single QSFP, cage 0 (Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q (5) /* 4x10G, single QSFP, cage 0 (Medford) OBSOLETE DO NOT USE */
+#define TLV_PORT_MODE_40G_10G_10G (6) /* 1x40G + 2x10G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_40G (7) /* 2x10G + 1x40G, dual QSFP (Greenport, Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q2 (8) /* 4x10G, single QSFP, cage 1 (Medford) */
+#define TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2 (9) /* 2x10G + 2x10G, dual QSFP (Medford) */
+#define TLV_PORT_MODE_MAX TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2
+};
+
+/* Type of the v-switch created implicitly by the firmware */
+
+#define TLV_TAG_VSWITCH_TYPE(port) (0x10120000 + (port))
+
+struct tlv_vswitch_type {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vswitch_type;
+#define TLV_VSWITCH_TYPE_DEFAULT (0xffffffff) /* Firmware default; equivalent to no TLV present for a given port */
+#define TLV_VSWITCH_TYPE_NONE (0)
+#define TLV_VSWITCH_TYPE_VLAN (1)
+#define TLV_VSWITCH_TYPE_VEB (2)
+#define TLV_VSWITCH_TYPE_VEPA (3)
+#define TLV_VSWITCH_TYPE_MUX (4)
+#define TLV_VSWITCH_TYPE_TEST (5)
+};
+
+/* A VLAN tag for the v-port created implicitly by the firmware */
+
+#define TLV_TAG_VPORT_VLAN_TAG(pf) (0x10130000 + (pf))
+
+struct tlv_vport_vlan_tag {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t vlan_tag;
+#define TLV_VPORT_NO_VLAN_TAG (0xFFFFFFFF) /* Default in the absence of TLV for a given PF */
+};
+
+/* Offset to be applied to the 0v9 setting, wherever it came from */
+
+#define TLV_TAG_ATB_0V9_OFFSET (0x10140000)
+
+struct tlv_0v9_atb_offset {
+ uint32_t tag;
+ uint32_t length;
+ int16_t offset_millivolts;
+ uint16_t reserved;
+};
+
+/* A privilege mask given on reset to all non-admin PCIe functions (that is other than first-PF-per-port).
+ * The meaning of particular bits is defined in mcdi_ef10.yml under MC_CMD_PRIVILEGE_MASK, see also bug 44583.
+ * TLV_TAG_PRIVILEGE_MASK_ADD specifies bits that should be added (ORed) to firmware default while
+ * TLV_TAG_PRIVILEGE_MASK_REM specifies bits that should be removed (ANDed) from firmware default:
+ * Initial_privilege_mask = (firmware_default_mask | privilege_mask_add) & ~privilege_mask_rem */
+
+#define TLV_TAG_PRIVILEGE_MASK (0x10150000) /* legacy symbol - do not use */
+
+struct tlv_privilege_mask { /* legacy structure - do not use */
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD (0x10150000)
+
+struct tlv_privilege_mask_add {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+#define TLV_TAG_PRIVILEGE_MASK_REM (0x10160000)
+
+struct tlv_privilege_mask_rem {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_rem;
+};
+
+/* Additional privileges given to all PFs.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_ALL_PFS (0x10190000)
+
+struct tlv_privilege_mask_add_all_pfs {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Additional privileges given to a selected PF.
+ * This tag takes precedence over TLV_TAG_PRIVILEGE_MASK_REM. */
+
+#define TLV_TAG_PRIVILEGE_MASK_ADD_SINGLE_PF(pf) (0x101A0000 + (pf))
+
+struct tlv_privilege_mask_add_single_pf {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t privilege_mask_add;
+};
+
+/* Turning on/off the PFIOV mode.
+ * This tag only takes effect if TLV_TAG_VSWITCH_TYPE is missing or set to DEFAULT. */
+
+#define TLV_TAG_PFIOV(port) (0x10170000 + (port))
+
+struct tlv_pfiov {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t pfiov;
+#define TLV_PFIOV_OFF (0) /* Default */
+#define TLV_PFIOV_ON (1)
+};
+
+/* Multicast filter chaining mode selection.
+ *
+ * When enabled, multicast packets are delivered to all recipients of all
+ * matching multicast filters, with the exception that IP multicast filters
+ * will steal traffic from MAC multicast filters on a per-function basis.
+ * (New behaviour.)
+ *
+ * When disabled, multicast packets will always be delivered only to the
+ * recipients of the highest priority matching multicast filter.
+ * (Legacy behaviour.)
+ *
+ * The DEFAULT mode (which is the same as the tag not being present at all)
+ * is equivalent to ENABLED in production builds, and DISABLED in eftest
+ * builds.
+ *
+ * This option is intended to provide run-time control over this feature
+ * while it is being stabilised and may be withdrawn at some point in the
+ * future; the new behaviour is intended to become the standard behaviour.
+ */
+
+#define TLV_TAG_MCAST_FILTER_CHAINING (0x10180000)
+
+struct tlv_mcast_filter_chaining {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_MCAST_FILTER_CHAINING_DEFAULT (0xffffffff)
+#define TLV_MCAST_FILTER_CHAINING_DISABLED (0)
+#define TLV_MCAST_FILTER_CHAINING_ENABLED (1)
+};
+
+/* Pacer rate limit per PF */
+#define TLV_TAG_RATE_LIMIT(pf) (0x101b0000 + (pf))
+
+struct tlv_rate_limit {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t rate_mbps;
+};
+
+/* OCSD Enable/Disable
+ *
+ * This setting allows OCSD to be disabled. This is a requirement for HP
+ * servers to support PCI passthrough for virtualization.
+ *
+ * The DEFAULT mode (which is the same as the tag not being present) is
+ * equivalent to ENABLED.
+ *
+ * This option is not used by the MCFW, and is entirely handled by the various
+ * drivers that support OCSD, by reading the setting before they attempt
+ * to enable OCSD.
+ *
+ * bit0: OCSD Disabled/Enabled
+ */
+
+#define TLV_TAG_OCSD (0x101C0000)
+
+struct tlv_ocsd {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+#define TLV_OCSD_DISABLED 0
+#define TLV_OCSD_ENABLED 1 /* Default */
+};
+
+/* Descriptor cache config.
+ *
+ * Sets the sizes of the TX and RX descriptor caches as a power of 2. It also
+ * sets the total number of VIs. When the number of VIs is reduced VIs are taken
+ * away from the highest numbered port first, so a vi_count of 1024 means 1024
+ * VIs on the first port and 0 on the second (on a Torino).
+ */
+
+#define TLV_TAG_DESCRIPTOR_CACHE_CONFIG (0x101d0000)
+
+struct tlv_descriptor_cache_config {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t rx_desc_cache_size;
+ uint8_t tx_desc_cache_size;
+ uint16_t vi_count;
+};
+#define TLV_DESC_CACHE_DEFAULT (0xff)
+#define TLV_VI_COUNT_DEFAULT (0xffff)
+
+/* RX event merging config (read batching).
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins.
+ */
+
+#define TLV_TAG_RX_EVENT_MERGING_CONFIG (0x101e0000)
+
+struct tlv_rx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_RX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+};
+#define TLV_RX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_RX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_PCIE_LINK_SETTINGS (0x101f0000)
+struct tlv_pcie_link_settings {
+ uint32_t tag;
+ uint32_t length;
+ uint16_t gen; /* Target PCIe generation: 1, 2, 3 */
+ uint16_t width; /* Number of lanes */
+};
+
+/* TX event merging config.
+ *
+ * Sets the global maximum number of events for the merging bins, and the
+ * global timeout configuration for the bins, and the global timeout for
+ * empty queues.
+ */
+#define TLV_TAG_TX_EVENT_MERGING_CONFIG (0x10210000)
+struct tlv_tx_event_merging_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t max_events;
+#define TLV_TX_EVENT_MERGING_CONFIG_MAX_EVENTS_MAX ((1 << 4) - 1)
+ uint32_t timeout_ns;
+ uint32_t qempty_timeout_ns; /* Medford only */
+};
+#define TLV_TX_EVENT_MERGING_MAX_EVENTS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_TIMEOUT_NS_DEFAULT (0xffffffff)
+#define TLV_TX_EVENT_MERGING_QEMPTY_TIMEOUT_NS_DEFAULT (0xffffffff)
+
+#define TLV_TAG_LICENSE (0x30800000)
+
+typedef struct tlv_license {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t data[];
+} tlv_license_t;
+
+/* TSA NIC IP address configuration
+ *
+ * Sets the TSA NIC IP address statically via configuration tool or dynamically
+ * via DHCP via snooping based on the mode selection (0=Static, 1=DHCP, 2=Snoop)
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAN_CONFIG (0x10220000)
+
+#define TLV_TSAN_IP_MODE_STATIC (0)
+#define TLV_TSAN_IP_MODE_DHCP (1)
+#define TLV_TSAN_IP_MODE_SNOOP (2)
+typedef struct tlv_tsan_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t mode;
+ uint32_t ip;
+ uint32_t netmask;
+ uint32_t gateway;
+ uint32_t port;
+ uint32_t bind_retry; /* DEPRECATED */
+ uint32_t bind_bkout; /* DEPRECATED */
+} tlv_tsan_config_t;
+
+/* TSA Controller IP address configuration
+ *
+ * Sets the TSA Controller IP address statically via configuration tool
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_TSAC_CONFIG (0x10230000)
+
+#define TLV_MAX_TSACS (4)
+typedef struct tlv_tsac_config {
+ uint32_t tag;
+ uint32_t length;
+ uint32_t num_tsacs;
+ uint32_t ip[TLV_MAX_TSACS];
+ uint32_t port[TLV_MAX_TSACS];
+} tlv_tsac_config_t;
+
+/* Binding ticket
+ *
+ * Sets the TSA NIC binding ticket used for binding process between the TSA NIC
+ * and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_BINDING_TICKET (0x10240000)
+
+typedef struct tlv_binding_ticket {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_binding_ticket_t;
+
+/* Solarflare private key (DEPRECATED)
+ *
+ * Sets the Solareflare private key used for signing during the binding process
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_PIK_SF (0x10250000) /* DEPRECATED */
+
+typedef struct tlv_pik_sf {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_pik_sf_t;
+
+/* CA root certificate
+ *
+ * Sets the CA root certificate used for TSA Controller verfication during
+ * TLS connection setup between the TSA NIC and the TSA Controller
+ *
+ * NOTE: This TAG is temporarily placed in the dynamic config partition and will
+ * be moved to a private partition during TSA development. It is not used in any
+ * released code yet.
+ */
+
+#define TLV_TAG_TMP_CA_ROOT_CERT (0x10260000)
+
+typedef struct tlv_ca_root_cert {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t bytes[];
+} tlv_ca_root_cert_t;
+
+/* Tx vFIFO Low latency configuration
+ *
+ * To keep the desired booting behaviour for the switch, it just requires to
+ * know if the low latency mode is enabled.
+ */
+
+#define TLV_TAG_TX_VFIFO_ULL_MODE (0x10270000)
+struct tlv_tx_vfifo_ull_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_TX_VFIFO_ULL_MODE_DEFAULT 0
+};
+
+/* BIU mode
+ *
+ * Medford2 tag for selecting VI window decode (see values below)
+ */
+#define TLV_TAG_BIU_VI_WINDOW_MODE (0x10280000)
+struct tlv_biu_vi_window_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_BIU_VI_WINDOW_MODE_8K 0 /* 8k per VI, CTPIO not mapped, medford/hunt compatible */
+#define TLV_BIU_VI_WINDOW_MODE_16K 1 /* 16k per VI, CTPIO mapped */
+#define TLV_BIU_VI_WINDOW_MODE_64K 2 /* 64k per VI, CTPIO mapped, POWER-friendly */
+};
+
+/* FastPD mode
+ *
+ * Medford2 tag for configuring the FastPD mode (see values below)
+ */
+#define TLV_TAG_FASTPD_MODE(port) (0x10290000 + (port))
+struct tlv_fastpd_mode {
+ uint32_t tag;
+ uint32_t length;
+ uint8_t mode;
+#define TLV_FASTPD_MODE_SOFT_ALL 0 /* All packets to the SoftPD */
+#define TLV_FASTPD_MODE_FAST_ALL 1 /* All packets to the FastPD */
+#define TLV_FASTPD_MODE_FAST_SUPPORTED 2 /* Supported packet types to the FastPD; everything else to the SoftPD */
+};
+
+#endif /* CI_MGMT_TLV_LAYOUT_H */
diff --git a/drivers/net/sfc/base/ef10_tx.c b/drivers/net/sfc/base/ef10_tx.c
new file mode 100644
index 00000000..0f8e9b1b
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_tx.c
@@ -0,0 +1,710 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+static __checkReturn efx_rc_t
+efx_mcdi_init_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t size,
+ __in uint32_t target_evq,
+ __in uint32_t label,
+ __in uint32_t instance,
+ __in uint16_t flags,
+ __in efsys_mem_t *esmp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_INIT_TXQ_IN_LEN(EFX_TXQ_MAX_BUFS),
+ MC_CMD_INIT_TXQ_OUT_LEN)];
+ efx_qword_t *dma_addr;
+ uint64_t addr;
+ int npages;
+ int i;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(EFX_TXQ_MAX_BUFS >=
+ EFX_TXQ_NBUFS(enp->en_nic_cfg.enc_txq_max_ndescs));
+
+ npages = EFX_TXQ_NBUFS(size);
+ if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_INIT_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, size);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance);
+
+ MCDI_IN_POPULATE_DWORD_7(req, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_BUFF_MODE, 0,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1,
+ INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0,
+ INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0,
+ INIT_TXQ_IN_CRC_MODE, 0,
+ INIT_TXQ_IN_FLAG_TIMESTAMP, 0);
+
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR);
+ addr = EFSYS_MEM_ADDR(esmp);
+
+ for (i = 0; i < npages; i++) {
+ EFX_POPULATE_QWORD_2(*dma_addr,
+ EFX_DWORD_1, (uint32_t)(addr >> 32),
+ EFX_DWORD_0, (uint32_t)(addr & 0xffffffff));
+
+ dma_addr++;
+ addr += EFX_BUF_SIZE;
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fini_txq(
+ __in efx_nic_t *enp,
+ __in uint32_t instance)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FINI_TXQ_IN_LEN,
+ MC_CMD_FINI_TXQ_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FINI_TXQ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if ((req.emr_rc != 0) && (req.emr_rc != MC_CMD_ERR_EALREADY)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_init(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+}
+
+ void
+ef10_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_qword_t desc;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(id))
+
+ if ((rc = efx_mcdi_init_txq(enp, n, eep->ee_index, label, index, flags,
+ esmp)) != 0)
+ goto fail1;
+
+ /*
+ * A previous user of this TX queue may have written a descriptor to the
+ * TX push collector, but not pushed the doorbell (e.g. after a crash).
+ * The next doorbell write would then push the stale descriptor.
+ *
+ * Ensure the (per network port) TX push collector is cleared by writing
+ * a no-op TX option descriptor. See bug29981 for details.
+ */
+ *addedp = 1;
+ EFX_POPULATE_QWORD_4(desc,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 1 : 0,
+ ESF_DZ_TX_OPTION_IP_CSUM,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 1 : 0);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, 0, &desc);
+ ef10_tx_qpush(etp, *addedp, 0);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_piobuf_handle_t handle;
+ efx_rc_t rc;
+
+ if (etp->et_pio_size != 0) {
+ rc = EALREADY;
+ goto fail1;
+ }
+
+ /* Sub-allocate a PIO block from a piobuf */
+ if ((rc = ef10_nic_pio_alloc(enp,
+ &etp->et_pio_bufnum,
+ &handle,
+ &etp->et_pio_blknum,
+ &etp->et_pio_offset,
+ &etp->et_pio_size)) != 0) {
+ goto fail2;
+ }
+ EFSYS_ASSERT3U(etp->et_pio_size, !=, 0);
+
+ /* Link the piobuf to this TXQ */
+ if ((rc = ef10_nic_pio_link(enp, etp->et_index, handle)) != 0) {
+ goto fail3;
+ }
+
+ /*
+ * et_pio_offset is the offset of the sub-allocated block within the
+ * hardware PIO buffer. It is used as the buffer address in the PIO
+ * option descriptor.
+ *
+ * et_pio_write_offset is the offset of the sub-allocated block from the
+ * start of the write-combined memory mapping, and is used for writing
+ * data into the PIO buffer.
+ */
+ etp->et_pio_write_offset =
+ (etp->et_pio_bufnum * ER_DZ_TX_PIOBUF_STEP) +
+ ER_DZ_TX_PIOBUF_OFST + etp->et_pio_offset;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+ ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+fail2:
+ EFSYS_PROBE(fail2);
+ etp->et_pio_size = 0;
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+
+ if (etp->et_pio_size != 0) {
+ /* Unlink the piobuf from this TXQ */
+ ef10_nic_pio_unlink(enp, etp->et_index);
+
+ /* Free the sub-allocated PIO block */
+ ef10_nic_pio_free(enp, etp->et_pio_bufnum, etp->et_pio_blknum);
+ etp->et_pio_size = 0;
+ etp->et_pio_write_offset = 0;
+ }
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(length) uint8_t *buffer,
+ __in size_t length,
+ __in size_t offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efsys_bar_t *esbp = enp->en_esbp;
+ uint32_t write_offset;
+ uint32_t write_offset_limit;
+ efx_qword_t *eqp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(length % sizeof (efx_qword_t) == 0);
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail1;
+ }
+ if (offset + length > etp->et_pio_size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ /*
+ * Writes to PIO buffers must be 64 bit aligned, and multiples of
+ * 64 bits.
+ */
+ write_offset = etp->et_pio_write_offset + offset;
+ write_offset_limit = write_offset + length;
+ eqp = (efx_qword_t *)buffer;
+ while (write_offset < write_offset_limit) {
+ EFSYS_BAR_WC_WRITEQ(esbp, write_offset, eqp);
+ eqp++;
+ write_offset += sizeof (efx_qword_t);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_qword_t pio_desc;
+ unsigned int id;
+ size_t offset;
+ unsigned int added = *addedp;
+ efx_rc_t rc;
+
+
+ if (added - completed + 1 > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (etp->et_pio_size == 0) {
+ rc = ENOENT;
+ goto fail2;
+ }
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE4(tx_pio_post, unsigned int, etp->et_index,
+ unsigned int, id, uint32_t, etp->et_pio_offset,
+ size_t, pkt_length);
+
+ EFX_POPULATE_QWORD_5(pio_desc,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE, 1,
+ ESF_DZ_TX_PIO_CONT, 0,
+ ESF_DZ_TX_PIO_BYTE_CNT, pkt_length,
+ ESF_DZ_TX_PIO_BUF_ADDR, etp->et_pio_offset);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &pio_desc);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST_PIO);
+
+ *addedp = added;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t addr = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ boolean_t eop = ebp->eb_eop;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t qword;
+
+ /* No limitations on boundary crossing */
+ EFSYS_ASSERT(size <=
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_PROBE5(tx_post, unsigned int, etp->et_index,
+ unsigned int, id, efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(qword,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &qword);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * This improves performance by pushing a TX descriptor at the same time as the
+ * doorbell. The descriptor must be added to the TXQ, so that can be used if the
+ * hardware decides not to use the pushed descriptor.
+ */
+ void
+ef10_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ unsigned int wptr;
+ unsigned int id;
+ size_t offset;
+ efx_qword_t desc;
+ efx_oword_t oword;
+
+ wptr = added & etp->et_mask;
+ id = pushed & etp->et_mask;
+ offset = id * sizeof (efx_qword_t);
+
+ EFSYS_MEM_READQ(etp->et_esmp, offset, &desc);
+ EFX_POPULATE_OWORD_3(oword,
+ ERF_DZ_TX_DESC_WPTR, wptr,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1, wptr, id);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_DOORBELL_WRITEO(enp, ER_DZ_TX_DESC_UPD_REG, etp->et_index,
+ &oword);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, n);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ /* No limitations on boundary crossing */
+ EFSYS_ASSERT(size <= etp->et_enp->en_nic_cfg.enc_tx_dma_desc_size_max);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, (eop) ? 0 : 1,
+ ESF_DZ_TX_KER_BYTE_CNT, (uint32_t)(size),
+ ESF_DZ_TX_KER_BUF_ADDR_DW0, (uint32_t)(addr & 0xffffffff),
+ ESF_DZ_TX_KER_BUF_ADDR_DW1, (uint32_t)(addr >> 32));
+}
+
+ void
+ef10_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ EFSYS_PROBE4(tx_desc_tso_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint8_t, tcp_flags);
+
+ EFX_POPULATE_QWORD_5(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_TCP_FLAGS, tcp_flags,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+}
+
+ void
+ef10_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ EFSYS_PROBE4(tx_desc_tso2_create, unsigned int, etp->et_index,
+ uint16_t, ipv4_id, uint32_t, tcp_seq,
+ uint16_t, tcp_mss);
+
+ EFSYS_ASSERT(count >= EFX_TX_FATSOV2_OPT_NDESCS);
+
+ EFX_POPULATE_QWORD_5(edp[0].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
+ ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_TCP_SEQNO, tcp_seq);
+ EFX_POPULATE_QWORD_4(edp[1].ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_TSO,
+ ESF_DZ_TX_TSO_OPTION_TYPE,
+ ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+ ESF_DZ_TX_TSO_TCP_MSS, tcp_mss);
+}
+
+ void
+ef10_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ EFSYS_PROBE2(tx_desc_vlantci_create, unsigned int, etp->et_index,
+ uint16_t, tci);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ ESF_DZ_TX_DESC_IS_OPT, 1,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_VLAN,
+ ESF_DZ_TX_VLAN_OP, tci ? 1 : 0,
+ ESF_DZ_TX_VLAN_TAG1, tci);
+}
+
+
+ __checkReturn efx_rc_t
+ef10_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_rc_t rc;
+
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp, ns))
+ _NOTE(CONSTANTCONDITION)
+ if (B_FALSE) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ /* FIXME */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_fini_txq(enp, etp->et_index)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ /* FIXME */
+ _NOTE(ARGUNUSED(etp))
+ /* FIXME */
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+ef10_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/ef10_vpd.c b/drivers/net/sfc/base/ef10_vpd.c
new file mode 100644
index 00000000..71123a90
--- /dev/null
+++ b/drivers/net/sfc/base/ef10_vpd.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+#include "ef10_tlv_layout.h"
+
+ __checkReturn efx_rc_t
+ef10_vpd_init(
+ __in efx_nic_t *enp)
+{
+ caddr_t svpd;
+ size_t svpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_STATIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_STATIC_VPD(pci_pf);
+ }
+
+ /*
+ * The VPD interface exposes VPD resources from the combined static and
+ * dynamic VPD storage. As the static VPD configuration should *never*
+ * change, we can cache it.
+ */
+ svpd = NULL;
+ svpd_size = 0;
+ rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_STATIC_CONFIG,
+ tag, &svpd, &svpd_size);
+ if (rc != 0) {
+ if (rc == EACCES) {
+ /* Unprivileged functions cannot access VPD */
+ goto out;
+ }
+ goto fail1;
+ }
+
+ if (svpd != NULL && svpd_size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, svpd_size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_arch.ef10.ena_svpd = svpd;
+ enp->en_arch.ef10.ena_svpd_length = svpd_size;
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, svpd_size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd,
+ * which is the size of the DYNAMIC_CONFIG partition.
+ */
+ if ((rc = efx_mcdi_nvram_info(enp, NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ sizep, NULL, NULL, NULL)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ caddr_t dvpd;
+ size_t dvpd_size;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ if ((rc = ef10_nvram_partn_read_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, &dvpd, &dvpd_size)) != 0)
+ goto fail1;
+
+ if (dvpd_size > size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+ memcpy(data, dvpd, dvpd_size);
+
+ /* Pad data with all-1s, consistent with update operations */
+ memset(data + dvpd_size, 0xff, size - dvpd_size);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dvpd_size, dvpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. ef10_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create an ID string if the dynamic cfg doesn't have one
+ */
+ if (enp->en_arch.ef10.ena_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value,
+ enp->en_arch.ef10.ena_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+ goto fail2;
+ }
+
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_arch.ef10.ena_svpd,
+ enp->en_arch.ef10.ena_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+ef10_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t vpd_length;
+ uint32_t pci_pf;
+ uint32_t tag;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_nic_cfg.enc_vpd_is_global) {
+ tag = TLV_TAG_GLOBAL_DYNAMIC_VPD;
+ } else {
+ pci_pf = enp->en_nic_cfg.enc_pf;
+ tag = TLV_TAG_PF_DYNAMIC_VPD(pci_pf);
+ }
+
+ /* Determine total length of new dynamic VPD */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Store new dynamic VPD in all segments in DYNAMIC_CONFIG partition */
+ if ((rc = ef10_nvram_partn_write_segment_tlv(enp,
+ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,
+ tag, data, vpd_length, B_TRUE)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+ef10_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD);
+
+ if (enp->en_arch.ef10.ena_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_arch.ef10.ena_svpd_length,
+ enp->en_arch.ef10.ena_svpd);
+
+ enp->en_arch.ef10.ena_svpd = NULL;
+ enp->en_arch.ef10.ena_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/base/efx.h b/drivers/net/sfc/base/efx.h
new file mode 100644
index 00000000..7eabc370
--- /dev/null
+++ b/drivers/net/sfc/base/efx.h
@@ -0,0 +1,2535 @@
+/*
+ * Copyright (c) 2006-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_H
+#define _SYS_EFX_H
+
+#include "efsys.h"
+#include "efx_check.h"
+#include "efx_phy_ids.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_STATIC_ASSERT(_cond) \
+ ((void)sizeof(char[(_cond) ? 1 : -1]))
+
+#define EFX_ARRAY_SIZE(_array) \
+ (sizeof(_array) / sizeof((_array)[0]))
+
+#define EFX_FIELD_OFFSET(_type, _field) \
+ ((size_t) &(((_type *)0)->_field))
+
+/* Return codes */
+
+typedef __success(return == 0) int efx_rc_t;
+
+
+/* Chip families */
+
+typedef enum efx_family_e {
+ EFX_FAMILY_INVALID,
+ EFX_FAMILY_FALCON, /* Obsolete and not supported */
+ EFX_FAMILY_SIENA,
+ EFX_FAMILY_HUNTINGTON,
+ EFX_FAMILY_MEDFORD,
+ EFX_FAMILY_NTYPES
+} efx_family_t;
+
+extern __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp);
+
+
+#define EFX_PCI_VENID_SFC 0x1924
+
+#define EFX_PCI_DEVID_FALCON 0x0710 /* SFC4000 */
+
+#define EFX_PCI_DEVID_BETHPAGE 0x0803 /* SFC9020 */
+#define EFX_PCI_DEVID_SIENA 0x0813 /* SFL9021 */
+#define EFX_PCI_DEVID_SIENA_F1_UNINIT 0x0810
+
+#define EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT 0x0901
+#define EFX_PCI_DEVID_FARMINGDALE 0x0903 /* SFC9120 PF */
+#define EFX_PCI_DEVID_GREENPORT 0x0923 /* SFC9140 PF */
+
+#define EFX_PCI_DEVID_FARMINGDALE_VF 0x1903 /* SFC9120 VF */
+#define EFX_PCI_DEVID_GREENPORT_VF 0x1923 /* SFC9140 VF */
+
+#define EFX_PCI_DEVID_MEDFORD_PF_UNINIT 0x0913
+#define EFX_PCI_DEVID_MEDFORD 0x0A03 /* SFC9240 PF */
+#define EFX_PCI_DEVID_MEDFORD_VF 0x1A03 /* SFC9240 VF */
+
+#define EFX_MEM_BAR 2
+
+/* Error codes */
+
+enum {
+ EFX_ERR_INVALID,
+ EFX_ERR_SRAM_OOB,
+ EFX_ERR_BUFID_DC_OOB,
+ EFX_ERR_MEM_PERR,
+ EFX_ERR_RBUF_OWN,
+ EFX_ERR_TBUF_OWN,
+ EFX_ERR_RDESQ_OWN,
+ EFX_ERR_TDESQ_OWN,
+ EFX_ERR_EVQ_OWN,
+ EFX_ERR_EVFF_OFLO,
+ EFX_ERR_ILL_ADDR,
+ EFX_ERR_SRAM_PERR,
+ EFX_ERR_NCODES
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+extern __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length);
+
+
+/* Type prototypes */
+
+typedef struct efx_rxq_s efx_rxq_t;
+
+/* NIC */
+
+typedef struct efx_nic_s efx_nic_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp);
+
+extern __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+efx_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_unprobe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_nic_destroy(
+ __in efx_nic_t *enp);
+
+#define EFX_PCIE_LINK_SPEED_GEN1 1
+#define EFX_PCIE_LINK_SPEED_GEN2 2
+#define EFX_PCIE_LINK_SPEED_GEN3 3
+
+typedef enum efx_pcie_link_performance_e {
+ EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH,
+ EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY,
+ EFX_PCIE_LINK_PERFORMANCE_OPTIMAL
+} efx_pcie_link_performance_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp);
+
+extern __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp);
+
+#if EFSYS_OPT_MCDI
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+/* Huntington and Medford require MCDIv2 commands */
+#define WITH_MCDI_V2 1
+#endif
+
+typedef struct efx_mcdi_req_s efx_mcdi_req_t;
+
+typedef enum efx_mcdi_exception_e {
+ EFX_MCDI_EXCEPTION_MC_REBOOT,
+ EFX_MCDI_EXCEPTION_MC_BADASSERT,
+} efx_mcdi_exception_t;
+
+#if EFSYS_OPT_MCDI_LOGGING
+typedef enum efx_log_msg_e {
+ EFX_LOG_INVALID,
+ EFX_LOG_MCDI_REQUEST,
+ EFX_LOG_MCDI_RESPONSE,
+} efx_log_msg_t;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+typedef struct efx_mcdi_transport_s {
+ void *emt_context;
+ efsys_mem_t *emt_dma_mem;
+ void (*emt_execute)(void *, efx_mcdi_req_t *);
+ void (*emt_ev_cpl)(void *);
+ void (*emt_exception)(void *, efx_mcdi_exception_t);
+#if EFSYS_OPT_MCDI_LOGGING
+ void (*emt_logger)(void *, efx_log_msg_t,
+ void *, size_t, void *, size_t);
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ void (*emt_ev_proxy_response)(void *, uint32_t, efx_rc_t);
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+} efx_mcdi_transport_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp);
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *usec_timeoutp);
+
+extern void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mcdi_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+/* INTR */
+
+#define EFX_NINTR_SIENA 1024
+
+typedef enum efx_intr_type_e {
+ EFX_INTR_INVALID = 0,
+ EFX_INTR_LINE,
+ EFX_INTR_MESSAGE,
+ EFX_INTR_NTYPES
+} efx_intr_type_t;
+
+#define EFX_INTR_SIZE (sizeof (efx_oword_t))
+
+extern __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+extern void
+efx_intr_enable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+#define EFX_INTR_NEVQS 32
+
+extern __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+extern void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *maskp);
+
+extern void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+extern void
+efx_intr_fatal(
+ __in efx_nic_t *enp);
+
+extern void
+efx_intr_fini(
+ __in efx_nic_t *enp);
+
+/* MAC */
+
+#if EFSYS_OPT_MAC_STATS
+
+/* START MKCONFIG GENERATED EfxHeaderMacBlock e323546097fd7c65 */
+typedef enum efx_mac_stat_e {
+ EFX_MAC_RX_OCTETS,
+ EFX_MAC_RX_PKTS,
+ EFX_MAC_RX_UNICST_PKTS,
+ EFX_MAC_RX_MULTICST_PKTS,
+ EFX_MAC_RX_BRDCST_PKTS,
+ EFX_MAC_RX_PAUSE_PKTS,
+ EFX_MAC_RX_LE_64_PKTS,
+ EFX_MAC_RX_65_TO_127_PKTS,
+ EFX_MAC_RX_128_TO_255_PKTS,
+ EFX_MAC_RX_256_TO_511_PKTS,
+ EFX_MAC_RX_512_TO_1023_PKTS,
+ EFX_MAC_RX_1024_TO_15XX_PKTS,
+ EFX_MAC_RX_GE_15XX_PKTS,
+ EFX_MAC_RX_ERRORS,
+ EFX_MAC_RX_FCS_ERRORS,
+ EFX_MAC_RX_DROP_EVENTS,
+ EFX_MAC_RX_FALSE_CARRIER_ERRORS,
+ EFX_MAC_RX_SYMBOL_ERRORS,
+ EFX_MAC_RX_ALIGN_ERRORS,
+ EFX_MAC_RX_INTERNAL_ERRORS,
+ EFX_MAC_RX_JABBER_PKTS,
+ EFX_MAC_RX_LANE0_CHAR_ERR,
+ EFX_MAC_RX_LANE1_CHAR_ERR,
+ EFX_MAC_RX_LANE2_CHAR_ERR,
+ EFX_MAC_RX_LANE3_CHAR_ERR,
+ EFX_MAC_RX_LANE0_DISP_ERR,
+ EFX_MAC_RX_LANE1_DISP_ERR,
+ EFX_MAC_RX_LANE2_DISP_ERR,
+ EFX_MAC_RX_LANE3_DISP_ERR,
+ EFX_MAC_RX_MATCH_FAULT,
+ EFX_MAC_RX_NODESC_DROP_CNT,
+ EFX_MAC_TX_OCTETS,
+ EFX_MAC_TX_PKTS,
+ EFX_MAC_TX_UNICST_PKTS,
+ EFX_MAC_TX_MULTICST_PKTS,
+ EFX_MAC_TX_BRDCST_PKTS,
+ EFX_MAC_TX_PAUSE_PKTS,
+ EFX_MAC_TX_LE_64_PKTS,
+ EFX_MAC_TX_65_TO_127_PKTS,
+ EFX_MAC_TX_128_TO_255_PKTS,
+ EFX_MAC_TX_256_TO_511_PKTS,
+ EFX_MAC_TX_512_TO_1023_PKTS,
+ EFX_MAC_TX_1024_TO_15XX_PKTS,
+ EFX_MAC_TX_GE_15XX_PKTS,
+ EFX_MAC_TX_ERRORS,
+ EFX_MAC_TX_SGL_COL_PKTS,
+ EFX_MAC_TX_MULT_COL_PKTS,
+ EFX_MAC_TX_EX_COL_PKTS,
+ EFX_MAC_TX_LATE_COL_PKTS,
+ EFX_MAC_TX_DEF_PKTS,
+ EFX_MAC_TX_EX_DEF_PKTS,
+ EFX_MAC_PM_TRUNC_BB_OVERFLOW,
+ EFX_MAC_PM_DISCARD_BB_OVERFLOW,
+ EFX_MAC_PM_TRUNC_VFIFO_FULL,
+ EFX_MAC_PM_DISCARD_VFIFO_FULL,
+ EFX_MAC_PM_TRUNC_QBB,
+ EFX_MAC_PM_DISCARD_QBB,
+ EFX_MAC_PM_DISCARD_MAPPING,
+ EFX_MAC_RXDP_Q_DISABLED_PKTS,
+ EFX_MAC_RXDP_DI_DROPPED_PKTS,
+ EFX_MAC_RXDP_STREAMING_PKTS,
+ EFX_MAC_RXDP_HLB_FETCH,
+ EFX_MAC_RXDP_HLB_WAIT,
+ EFX_MAC_VADAPTER_RX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_RX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_RX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_RX_BAD_BYTES,
+ EFX_MAC_VADAPTER_RX_OVERFLOW,
+ EFX_MAC_VADAPTER_TX_UNICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_UNICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_MULTICAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS,
+ EFX_MAC_VADAPTER_TX_BROADCAST_BYTES,
+ EFX_MAC_VADAPTER_TX_BAD_PACKETS,
+ EFX_MAC_VADAPTER_TX_BAD_BYTES,
+ EFX_MAC_VADAPTER_TX_OVERFLOW,
+ EFX_MAC_NSTATS
+} efx_mac_stat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderMacBlock */
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef enum efx_link_mode_e {
+ EFX_LINK_UNKNOWN = 0,
+ EFX_LINK_DOWN,
+ EFX_LINK_10HDX,
+ EFX_LINK_10FDX,
+ EFX_LINK_100HDX,
+ EFX_LINK_100FDX,
+ EFX_LINK_1000HDX,
+ EFX_LINK_1000FDX,
+ EFX_LINK_10000FDX,
+ EFX_LINK_40000FDX,
+ EFX_LINK_NMODES
+} efx_link_mode_t;
+
+#define EFX_MAC_ADDR_LEN 6
+
+#define EFX_MAC_ADDR_IS_MULTICAST(_address) (((uint8_t *)_address)[0] & 0x01)
+
+#define EFX_MAC_MULTICAST_LIST_MAX 256
+
+#define EFX_MAC_SDU_MAX 9202
+
+#define EFX_MAC_PDU_ADJUSTMENT \
+ (/* EtherII */ 14 \
+ + /* VLAN */ 4 \
+ + /* CRC */ 4 \
+ + /* bug16011 */ 16) \
+
+#define EFX_MAC_PDU(_sdu) \
+ P2ROUNDUP((_sdu) + EFX_MAC_PDU_ADJUSTMENT, 8)
+
+/*
+ * Due to the P2ROUNDUP in EFX_MAC_PDU(), EFX_MAC_SDU_FROM_PDU() may give
+ * the SDU rounded up slightly.
+ */
+#define EFX_MAC_SDU_FROM_PDU(_pdu) ((_pdu) - EFX_MAC_PDU_ADJUSTMENT)
+
+#define EFX_MAC_PDU_MIN 60
+#define EFX_MAC_PDU_MAX EFX_MAC_PDU(EFX_MAC_SDU_MAX)
+
+extern __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+extern __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu);
+
+extern __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst);
+
+extern __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count);
+
+extern __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss);
+
+extern void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled);
+
+extern __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+#define EFX_FCNTL_RESPOND 0x00000001
+#define EFX_FCNTL_GENERATE 0x00000002
+
+extern __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg);
+
+extern void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp);
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_MAC_STATS_MASK_BITS_PER_PAGE (8 * sizeof (uint32_t))
+
+#define EFX_MAC_STATS_MASK_NPAGES \
+ (P2ROUNDUP(EFX_MAC_NSTATS, EFX_MAC_STATS_MASK_BITS_PER_PAGE) / \
+ EFX_MAC_STATS_MASK_BITS_PER_PAGE)
+
+/*
+ * Get mask of MAC statistics supported by the hardware.
+ *
+ * If mask_size is insufficient to return the mask, EINVAL error is
+ * returned. EFX_MAC_STATS_MASK_NPAGES multiplied by size of the page
+ * (which is sizeof (uint32_t)) is sufficient.
+ */
+extern __checkReturn efx_rc_t
+efx_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __out_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+#define EFX_MAC_STAT_SUPPORTED(_mask, _stat) \
+ ((_mask)[(_stat) / EFX_MAC_STATS_MASK_BITS_PER_PAGE] & \
+ (1ULL << ((_stat) & (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1))))
+
+#define EFX_MAC_STATS_SIZE 0x400
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+/*
+ * Upload mac statistics supported by the hardware into the given buffer.
+ *
+ * The reference buffer must be at least %EFX_MAC_STATS_SIZE bytes,
+ * and page aligned.
+ *
+ * The hardware will only DMA statistics that it understands (of course).
+ * Drivers should not make any assumptions about which statistics are
+ * supported, especially when the statistics are generated by firmware.
+ *
+ * Thus, drivers should zero this buffer before use, so that not-understood
+ * statistics read back as zero.
+ */
+extern __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+extern __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+/* MON */
+
+typedef enum efx_mon_type_e {
+ EFX_MON_INVALID = 0,
+ EFX_MON_SFC90X0,
+ EFX_MON_SFC91X0,
+ EFX_MON_SFC92X0,
+ EFX_MON_NTYPES
+} efx_mon_type_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_name(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_MON_STATS
+
+#define EFX_MON_STATS_PAGE_SIZE 0x100
+#define EFX_MON_MASK_ELEMENT_SIZE 32
+
+/* START MKCONFIG GENERATED MonitorHeaderStatsBlock 5d4ee5185e419abe */
+typedef enum efx_mon_stat_e {
+ EFX_MON_STAT_2_5V,
+ EFX_MON_STAT_VCCP1,
+ EFX_MON_STAT_VCC,
+ EFX_MON_STAT_5V,
+ EFX_MON_STAT_12V,
+ EFX_MON_STAT_VCCP2,
+ EFX_MON_STAT_EXT_TEMP,
+ EFX_MON_STAT_INT_TEMP,
+ EFX_MON_STAT_AIN1,
+ EFX_MON_STAT_AIN2,
+ EFX_MON_STAT_INT_COOLING,
+ EFX_MON_STAT_EXT_COOLING,
+ EFX_MON_STAT_1V,
+ EFX_MON_STAT_1_2V,
+ EFX_MON_STAT_1_8V,
+ EFX_MON_STAT_3_3V,
+ EFX_MON_STAT_1_2VA,
+ EFX_MON_STAT_VREF,
+ EFX_MON_STAT_VAOE,
+ EFX_MON_STAT_AOE_TEMP,
+ EFX_MON_STAT_PSU_AOE_TEMP,
+ EFX_MON_STAT_PSU_TEMP,
+ EFX_MON_STAT_FAN0,
+ EFX_MON_STAT_FAN1,
+ EFX_MON_STAT_FAN2,
+ EFX_MON_STAT_FAN3,
+ EFX_MON_STAT_FAN4,
+ EFX_MON_STAT_VAOE_IN,
+ EFX_MON_STAT_IAOE,
+ EFX_MON_STAT_IAOE_IN,
+ EFX_MON_STAT_NIC_POWER,
+ EFX_MON_STAT_0_9V,
+ EFX_MON_STAT_I0_9V,
+ EFX_MON_STAT_I1_2V,
+ EFX_MON_STAT_0_9V_ADC,
+ EFX_MON_STAT_INT_TEMP2,
+ EFX_MON_STAT_VREG_TEMP,
+ EFX_MON_STAT_VREG_0_9V_TEMP,
+ EFX_MON_STAT_VREG_1_2V_TEMP,
+ EFX_MON_STAT_INT_VPTAT,
+ EFX_MON_STAT_INT_ADC_TEMP,
+ EFX_MON_STAT_EXT_VPTAT,
+ EFX_MON_STAT_EXT_ADC_TEMP,
+ EFX_MON_STAT_AMBIENT_TEMP,
+ EFX_MON_STAT_AIRFLOW,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR,
+ EFX_MON_STAT_VDD08D_VSS08D_CSR_EXTADC,
+ EFX_MON_STAT_HOTPOINT_TEMP,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT0,
+ EFX_MON_STAT_PHY_POWER_SWITCH_PORT1,
+ EFX_MON_STAT_MUM_VCC,
+ EFX_MON_STAT_0V9_A,
+ EFX_MON_STAT_I0V9_A,
+ EFX_MON_STAT_0V9_A_TEMP,
+ EFX_MON_STAT_0V9_B,
+ EFX_MON_STAT_I0V9_B,
+ EFX_MON_STAT_0V9_B_TEMP,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V2_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY,
+ EFX_MON_STAT_CCOM_AVREG_1V8_SUPPLY_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_MASTER_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP,
+ EFX_MON_STAT_CONTROLLER_SLAVE_VPTAT_EXT_ADC,
+ EFX_MON_STAT_CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC,
+ EFX_MON_STAT_SODIMM_VOUT,
+ EFX_MON_STAT_SODIMM_0_TEMP,
+ EFX_MON_STAT_SODIMM_1_TEMP,
+ EFX_MON_STAT_PHY0_VCC,
+ EFX_MON_STAT_PHY1_VCC,
+ EFX_MON_STAT_CONTROLLER_TDIODE_TEMP,
+ EFX_MON_STAT_BOARD_FRONT_TEMP,
+ EFX_MON_STAT_BOARD_BACK_TEMP,
+ EFX_MON_NSTATS
+} efx_mon_stat_t;
+
+/* END MKCONFIG GENERATED MonitorHeaderStatsBlock */
+
+typedef enum efx_mon_stat_state_e {
+ EFX_MON_STAT_STATE_OK = 0,
+ EFX_MON_STAT_STATE_WARNING = 1,
+ EFX_MON_STAT_STATE_FATAL = 2,
+ EFX_MON_STAT_STATE_BROKEN = 3,
+ EFX_MON_STAT_STATE_NO_READING = 4,
+} efx_mon_stat_state_t;
+
+typedef struct efx_mon_stat_value_s {
+ uint16_t emsv_value;
+ uint16_t emsv_state;
+} efx_mon_stat_value_t;
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+extern void
+efx_mon_fini(
+ __in efx_nic_t *enp);
+
+/* PHY */
+
+extern __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+typedef enum efx_phy_led_mode_e {
+ EFX_PHY_LED_DEFAULT = 0,
+ EFX_PHY_LED_OFF,
+ EFX_PHY_LED_ON,
+ EFX_PHY_LED_FLASH,
+ EFX_PHY_LED_NMODES
+} efx_phy_led_mode_t;
+
+extern __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode);
+
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+extern __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_LOOPBACK
+
+typedef enum efx_loopback_type_e {
+ EFX_LOOPBACK_OFF = 0,
+ EFX_LOOPBACK_DATA = 1,
+ EFX_LOOPBACK_GMAC = 2,
+ EFX_LOOPBACK_XGMII = 3,
+ EFX_LOOPBACK_XGXS = 4,
+ EFX_LOOPBACK_XAUI = 5,
+ EFX_LOOPBACK_GMII = 6,
+ EFX_LOOPBACK_SGMII = 7,
+ EFX_LOOPBACK_XGBR = 8,
+ EFX_LOOPBACK_XFI = 9,
+ EFX_LOOPBACK_XAUI_FAR = 10,
+ EFX_LOOPBACK_GMII_FAR = 11,
+ EFX_LOOPBACK_SGMII_FAR = 12,
+ EFX_LOOPBACK_XFI_FAR = 13,
+ EFX_LOOPBACK_GPHY = 14,
+ EFX_LOOPBACK_PHY_XS = 15,
+ EFX_LOOPBACK_PCS = 16,
+ EFX_LOOPBACK_PMA_PMD = 17,
+ EFX_LOOPBACK_XPORT = 18,
+ EFX_LOOPBACK_XGMII_WS = 19,
+ EFX_LOOPBACK_XAUI_WS = 20,
+ EFX_LOOPBACK_XAUI_WS_FAR = 21,
+ EFX_LOOPBACK_XAUI_WS_NEAR = 22,
+ EFX_LOOPBACK_GMII_WS = 23,
+ EFX_LOOPBACK_XFI_WS = 24,
+ EFX_LOOPBACK_XFI_WS_FAR = 25,
+ EFX_LOOPBACK_PHYXS_WS = 26,
+ EFX_LOOPBACK_PMA_INT = 27,
+ EFX_LOOPBACK_SD_NEAR = 28,
+ EFX_LOOPBACK_SD_FAR = 29,
+ EFX_LOOPBACK_PMA_INT_WS = 30,
+ EFX_LOOPBACK_SD_FEP2_WS = 31,
+ EFX_LOOPBACK_SD_FEP1_5_WS = 32,
+ EFX_LOOPBACK_SD_FEP_WS = 33,
+ EFX_LOOPBACK_SD_FES_WS = 34,
+ EFX_LOOPBACK_NTYPES
+} efx_loopback_type_t;
+
+typedef enum efx_loopback_kind_e {
+ EFX_LOOPBACK_KIND_OFF = 0,
+ EFX_LOOPBACK_KIND_ALL,
+ EFX_LOOPBACK_KIND_MAC,
+ EFX_LOOPBACK_KIND_PHY,
+ EFX_LOOPBACK_NKINDS
+} efx_loopback_kind_t;
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t type);
+
+#if EFSYS_OPT_NAMES
+
+extern __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep);
+
+extern void
+efx_port_fini(
+ __in efx_nic_t *enp);
+
+typedef enum efx_phy_cap_type_e {
+ EFX_PHY_CAP_INVALID = 0,
+ EFX_PHY_CAP_10HDX,
+ EFX_PHY_CAP_10FDX,
+ EFX_PHY_CAP_100HDX,
+ EFX_PHY_CAP_100FDX,
+ EFX_PHY_CAP_1000HDX,
+ EFX_PHY_CAP_1000FDX,
+ EFX_PHY_CAP_10000FDX,
+ EFX_PHY_CAP_PAUSE,
+ EFX_PHY_CAP_ASYM,
+ EFX_PHY_CAP_AN,
+ EFX_PHY_CAP_40000FDX,
+ EFX_PHY_CAP_NTYPES
+} efx_phy_cap_type_t;
+
+
+#define EFX_PHY_CAP_CURRENT 0x00000000
+#define EFX_PHY_CAP_DEFAULT 0x00000001
+#define EFX_PHY_CAP_PERM 0x00000002
+
+extern void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask);
+
+extern void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp);
+
+extern __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+typedef enum efx_phy_media_type_e {
+ EFX_PHY_MEDIA_INVALID = 0,
+ EFX_PHY_MEDIA_XAUI,
+ EFX_PHY_MEDIA_CX4,
+ EFX_PHY_MEDIA_KX4,
+ EFX_PHY_MEDIA_XFP,
+ EFX_PHY_MEDIA_SFP_PLUS,
+ EFX_PHY_MEDIA_BASE_T,
+ EFX_PHY_MEDIA_QSFP_PLUS,
+ EFX_PHY_MEDIA_NTYPES
+} efx_phy_media_type_t;
+
+/* Get the type of medium currently used. If the board has ports for
+ * modules, a module is present, and we recognise the media type of
+ * the module, then this will be the media type of the module.
+ * Otherwise it will be the media type of the port.
+ */
+extern void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep);
+
+extern efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#if EFSYS_OPT_PHY_STATS
+
+/* START MKCONFIG GENERATED PhyHeaderStatsBlock 30ed56ad501f8e36 */
+typedef enum efx_phy_stat_e {
+ EFX_PHY_STAT_OUI,
+ EFX_PHY_STAT_PMA_PMD_LINK_UP,
+ EFX_PHY_STAT_PMA_PMD_RX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_TX_FAULT,
+ EFX_PHY_STAT_PMA_PMD_REV_A,
+ EFX_PHY_STAT_PMA_PMD_REV_B,
+ EFX_PHY_STAT_PMA_PMD_REV_C,
+ EFX_PHY_STAT_PMA_PMD_REV_D,
+ EFX_PHY_STAT_PCS_LINK_UP,
+ EFX_PHY_STAT_PCS_RX_FAULT,
+ EFX_PHY_STAT_PCS_TX_FAULT,
+ EFX_PHY_STAT_PCS_BER,
+ EFX_PHY_STAT_PCS_BLOCK_ERRORS,
+ EFX_PHY_STAT_PHY_XS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_SYNC_A,
+ EFX_PHY_STAT_PHY_XS_SYNC_B,
+ EFX_PHY_STAT_PHY_XS_SYNC_C,
+ EFX_PHY_STAT_PHY_XS_SYNC_D,
+ EFX_PHY_STAT_AN_LINK_UP,
+ EFX_PHY_STAT_AN_MASTER,
+ EFX_PHY_STAT_AN_LOCAL_RX_OK,
+ EFX_PHY_STAT_AN_REMOTE_RX_OK,
+ EFX_PHY_STAT_CL22EXT_LINK_UP,
+ EFX_PHY_STAT_SNR_A,
+ EFX_PHY_STAT_SNR_B,
+ EFX_PHY_STAT_SNR_C,
+ EFX_PHY_STAT_SNR_D,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_A,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_B,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_C,
+ EFX_PHY_STAT_PMA_PMD_SIGNAL_D,
+ EFX_PHY_STAT_AN_COMPLETE,
+ EFX_PHY_STAT_PMA_PMD_REV_MAJOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MINOR,
+ EFX_PHY_STAT_PMA_PMD_REV_MICRO,
+ EFX_PHY_STAT_PCS_FW_VERSION_0,
+ EFX_PHY_STAT_PCS_FW_VERSION_1,
+ EFX_PHY_STAT_PCS_FW_VERSION_2,
+ EFX_PHY_STAT_PCS_FW_VERSION_3,
+ EFX_PHY_STAT_PCS_FW_BUILD_YY,
+ EFX_PHY_STAT_PCS_FW_BUILD_MM,
+ EFX_PHY_STAT_PCS_FW_BUILD_DD,
+ EFX_PHY_STAT_PCS_OP_MODE,
+ EFX_PHY_NSTATS
+} efx_phy_stat_t;
+
+/* END MKCONFIG GENERATED PhyHeaderStatsBlock */
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t stat);
+
+#endif /* EFSYS_OPT_NAMES */
+
+#define EFX_PHY_STATS_SIZE 0x100
+
+extern __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+typedef enum efx_bist_type_e {
+ EFX_BIST_TYPE_UNKNOWN,
+ EFX_BIST_TYPE_PHY_NORMAL,
+ EFX_BIST_TYPE_PHY_CABLE_SHORT,
+ EFX_BIST_TYPE_PHY_CABLE_LONG,
+ EFX_BIST_TYPE_MC_MEM, /* Test the MC DMEM and IMEM */
+ EFX_BIST_TYPE_SAT_MEM, /* Test the DMEM and IMEM of satellite cpus*/
+ EFX_BIST_TYPE_REG, /* Test the register memories */
+ EFX_BIST_TYPE_NTYPES,
+} efx_bist_type_t;
+
+typedef enum efx_bist_result_e {
+ EFX_BIST_RESULT_UNKNOWN,
+ EFX_BIST_RESULT_RUNNING,
+ EFX_BIST_RESULT_PASSED,
+ EFX_BIST_RESULT_FAILED,
+} efx_bist_result_t;
+
+typedef enum efx_phy_cable_status_e {
+ EFX_PHY_CABLE_STATUS_OK,
+ EFX_PHY_CABLE_STATUS_INVALID,
+ EFX_PHY_CABLE_STATUS_OPEN,
+ EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_INTERPAIRSHORT,
+ EFX_PHY_CABLE_STATUS_BUSY,
+} efx_phy_cable_status_t;
+
+typedef enum efx_bist_value_e {
+ EFX_BIST_PHY_CABLE_LENGTH_A,
+ EFX_BIST_PHY_CABLE_LENGTH_B,
+ EFX_BIST_PHY_CABLE_LENGTH_C,
+ EFX_BIST_PHY_CABLE_LENGTH_D,
+ EFX_BIST_PHY_CABLE_STATUS_A,
+ EFX_BIST_PHY_CABLE_STATUS_B,
+ EFX_BIST_PHY_CABLE_STATUS_C,
+ EFX_BIST_PHY_CABLE_STATUS_D,
+ EFX_BIST_FAULT_CODE,
+ /* Memory BIST specific values. These match to the MC_CMD_BIST_POLL
+ * response. */
+ EFX_BIST_MEM_TEST,
+ EFX_BIST_MEM_ADDR,
+ EFX_BIST_MEM_BUS,
+ EFX_BIST_MEM_EXPECT,
+ EFX_BIST_MEM_ACTUAL,
+ EFX_BIST_MEM_ECC,
+ EFX_BIST_MEM_ECC_PARITY,
+ EFX_BIST_MEM_ECC_FATAL,
+ EFX_BIST_NVALUES,
+} efx_bist_value_t;
+
+extern __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+#define EFX_FEATURE_IPV6 0x00000001
+#define EFX_FEATURE_LFSR_HASH_INSERT 0x00000002
+#define EFX_FEATURE_LINK_EVENTS 0x00000004
+#define EFX_FEATURE_PERIODIC_MAC_STATS 0x00000008
+#define EFX_FEATURE_MCDI 0x00000020
+#define EFX_FEATURE_LOOKAHEAD_SPLIT 0x00000040
+#define EFX_FEATURE_MAC_HEADER_FILTERS 0x00000080
+#define EFX_FEATURE_TURBO 0x00000100
+#define EFX_FEATURE_MCDI_DMA 0x00000200
+#define EFX_FEATURE_TX_SRC_FILTERS 0x00000400
+#define EFX_FEATURE_PIO_BUFFERS 0x00000800
+#define EFX_FEATURE_FW_ASSISTED_TSO 0x00001000
+#define EFX_FEATURE_FW_ASSISTED_TSO_V2 0x00002000
+#define EFX_FEATURE_PACKED_STREAM 0x00004000
+
+typedef struct efx_nic_cfg_s {
+ uint32_t enc_board_type;
+ uint32_t enc_phy_type;
+#if EFSYS_OPT_NAMES
+ char enc_phy_name[21];
+#endif
+ char enc_phy_revision[21];
+ efx_mon_type_t enc_mon_type;
+#if EFSYS_OPT_MON_STATS
+ uint32_t enc_mon_stat_dma_buf_size;
+ uint32_t enc_mon_stat_mask[(EFX_MON_NSTATS + 31) / 32];
+#endif
+ unsigned int enc_features;
+ uint8_t enc_mac_addr[6];
+ uint8_t enc_port; /* PHY port number */
+ uint32_t enc_intr_vec_base;
+ uint32_t enc_intr_limit;
+ uint32_t enc_evq_limit;
+ uint32_t enc_txq_limit;
+ uint32_t enc_rxq_limit;
+ uint32_t enc_txq_max_ndescs;
+ uint32_t enc_buftbl_limit;
+ uint32_t enc_piobuf_limit;
+ uint32_t enc_piobuf_size;
+ uint32_t enc_piobuf_min_alloc_size;
+ uint32_t enc_evq_timer_quantum_ns;
+ uint32_t enc_evq_timer_max_us;
+ uint32_t enc_clk_mult;
+ uint32_t enc_rx_prefix_size;
+ uint32_t enc_rx_buf_align_start;
+ uint32_t enc_rx_buf_align_end;
+#if EFSYS_OPT_LOOPBACK
+ efx_qword_t enc_loopback_types[EFX_LINK_NMODES];
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t enc_phy_flags_mask;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ uint32_t enc_led_mask;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+#if EFSYS_OPT_PHY_STATS
+ uint64_t enc_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_MCDI
+ uint8_t enc_mcdi_mdio_channel;
+#if EFSYS_OPT_PHY_STATS
+ uint32_t enc_mcdi_phy_stat_mask;
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_MON_STATS
+ uint32_t *enc_mcdi_sensor_maskp;
+ uint32_t enc_mcdi_sensor_mask_size;
+#endif /* EFSYS_OPT_MON_STATS */
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_BIST
+ uint32_t enc_bist_mask;
+#endif /* EFSYS_OPT_BIST */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ uint32_t enc_pf;
+ uint32_t enc_vf;
+ uint32_t enc_privilege_mask;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+ boolean_t enc_bug26807_workaround;
+ boolean_t enc_bug35388_workaround;
+ boolean_t enc_bug41750_workaround;
+ boolean_t enc_bug61265_workaround;
+ boolean_t enc_rx_batching_enabled;
+ /* Maximum number of descriptors completed in an rx event. */
+ uint32_t enc_rx_batch_max;
+ /* Number of rx descriptors the hardware requires for a push. */
+ uint32_t enc_rx_push_align;
+ /* Maximum amount of data in DMA descriptor */
+ uint32_t enc_tx_dma_desc_size_max;
+ /*
+ * Boundary which DMA descriptor data must not cross or 0 if no
+ * limitation.
+ */
+ uint32_t enc_tx_dma_desc_boundary;
+ /*
+ * Maximum number of bytes into the packet the TCP header can start for
+ * the hardware to apply TSO packet edits.
+ */
+ uint32_t enc_tx_tso_tcp_header_offset_limit;
+ boolean_t enc_fw_assisted_tso_enabled;
+ boolean_t enc_fw_assisted_tso_v2_enabled;
+ /* Number of TSO contexts on the NIC (FATSOv2) */
+ uint32_t enc_fw_assisted_tso_v2_n_contexts;
+ boolean_t enc_hw_tx_insert_vlan_enabled;
+ /* Number of PFs on the NIC */
+ uint32_t enc_hw_pf_count;
+ /* Datapath firmware vadapter/vport/vswitch support */
+ boolean_t enc_datapath_cap_evb;
+ boolean_t enc_rx_disable_scatter_supported;
+ boolean_t enc_allow_set_mac_with_installed_filters;
+ boolean_t enc_enhanced_set_mac_supported;
+ boolean_t enc_init_evq_v2_supported;
+ boolean_t enc_rx_packed_stream_supported;
+ boolean_t enc_rx_var_packed_stream_supported;
+ boolean_t enc_pm_and_rxdp_counters;
+ boolean_t enc_mac_stats_40g_tx_size_bins;
+ /* External port identifier */
+ uint8_t enc_external_port;
+ uint32_t enc_mcdi_max_payload_length;
+ /* VPD may be per-PF or global */
+ boolean_t enc_vpd_is_global;
+ /* Minimum unidirectional bandwidth in Mb/s to max out all ports */
+ uint32_t enc_required_pcie_bandwidth_mbps;
+ uint32_t enc_max_pcie_link_gen;
+ /* Firmware verifies integrity of NVRAM updates */
+ uint32_t enc_fw_verified_nvram_update_required;
+} efx_nic_cfg_t;
+
+#define EFX_PCI_FUNCTION_IS_PF(_encp) ((_encp)->enc_vf == 0xffff)
+#define EFX_PCI_FUNCTION_IS_VF(_encp) ((_encp)->enc_vf != 0xffff)
+
+#define EFX_PCI_FUNCTION(_encp) \
+ (EFX_PCI_FUNCTION_IS_PF(_encp) ? (_encp)->enc_pf : (_encp)->enc_vf)
+
+#define EFX_PCI_VF_PARENT(_encp) ((_encp)->enc_pf)
+
+extern const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp);
+
+typedef struct efx_nic_fw_info_s {
+ /* Basic FW version information */
+ uint16_t enfi_mc_fw_version[4];
+ /*
+ * If datapath capabilities can be detected,
+ * additional FW information is to be shown
+ */
+ boolean_t enfi_dpcpu_fw_ids_valid;
+ /* Rx and Tx datapath CPU FW IDs */
+ uint16_t enfi_rx_dpcpu_fw_id;
+ uint16_t enfi_tx_dpcpu_fw_id;
+} efx_nic_fw_info_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_fw_version(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_info_t *enfip);
+
+/* Driver resource limits (minimum required/maximum usable). */
+typedef struct efx_drv_limits_s {
+ uint32_t edl_min_evq_count;
+ uint32_t edl_max_evq_count;
+
+ uint32_t edl_min_rxq_count;
+ uint32_t edl_max_rxq_count;
+
+ uint32_t edl_min_txq_count;
+ uint32_t edl_max_txq_count;
+
+ /* PIO blocks (sub-allocated from piobuf) */
+ uint32_t edl_min_pio_alloc_size;
+ uint32_t edl_max_pio_alloc_count;
+} efx_drv_limits_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp);
+
+typedef enum efx_nic_region_e {
+ EFX_REGION_VI, /* Memory BAR UC mapping */
+ EFX_REGION_PIO_WRITE_VI, /* Memory BAR WC mapping */
+} efx_nic_region_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp);
+
+
+#if EFSYS_OPT_VPD
+
+typedef enum efx_vpd_tag_e {
+ EFX_VPD_ID = 0x02,
+ EFX_VPD_END = 0x0f,
+ EFX_VPD_RO = 0x10,
+ EFX_VPD_RW = 0x11,
+} efx_vpd_tag_t;
+
+typedef uint16_t efx_vpd_keyword_t;
+
+typedef struct efx_vpd_value_s {
+ efx_vpd_tag_t evv_tag;
+ efx_vpd_keyword_t evv_keyword;
+ uint8_t evv_length;
+ uint8_t evv_value[0x100];
+} efx_vpd_value_t;
+
+
+#define EFX_VPD_KEYWORD(x, y) ((x) | ((y) << 8))
+
+extern __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+/* NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef enum efx_nvram_type_e {
+ EFX_NVRAM_INVALID = 0,
+ EFX_NVRAM_BOOTROM,
+ EFX_NVRAM_BOOTROM_CFG,
+ EFX_NVRAM_MC_FIRMWARE,
+ EFX_NVRAM_MC_GOLDEN,
+ EFX_NVRAM_PHY,
+ EFX_NVRAM_NULLPHY,
+ EFX_NVRAM_FPGA,
+ EFX_NVRAM_FCFW,
+ EFX_NVRAM_CPLD,
+ EFX_NVRAM_FPGA_BACKUP,
+ EFX_NVRAM_DYNAMIC_CFG,
+ EFX_NVRAM_LICENSE,
+ EFX_NVRAM_UEFIROM,
+ EFX_NVRAM_NTYPES,
+} efx_nvram_type_t;
+
+extern __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *pref_chunkp);
+
+extern __checkReturn efx_rc_t
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size);
+
+extern __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type);
+
+extern __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+efx_nvram_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_BOOTCFG
+
+/* Report size and offset of bootcfg sector in NVRAM partition. */
+extern __checkReturn efx_rc_t
+efx_bootcfg_sector_info(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __out_opt uint32_t *sector_countp,
+ __out size_t *offsetp,
+ __out size_t *max_sizep);
+
+/*
+ * Copy bootcfg sector data to a target buffer which may differ in size.
+ * Optionally corrects format errors in source buffer.
+ */
+extern efx_rc_t
+efx_bootcfg_copy_sector(
+ __in efx_nic_t *enp,
+ __inout_bcount(sector_length)
+ uint8_t *sector,
+ __in size_t sector_length,
+ __out_bcount(data_size) uint8_t *data,
+ __in size_t data_size,
+ __in boolean_t handle_format_errors);
+
+extern efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_DIAG
+
+typedef enum efx_pattern_type_t {
+ EFX_PATTERN_BYTE_INCREMENT = 0,
+ EFX_PATTERN_ALL_THE_SAME,
+ EFX_PATTERN_BIT_ALTERNATE,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_PATTERN_BIT_SWEEP,
+ EFX_PATTERN_NTYPES
+} efx_pattern_type_t;
+
+typedef void
+(*efx_sram_pattern_fn_t)(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp);
+
+extern __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n);
+
+extern void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n);
+
+#define EFX_BUF_TBL_SIZE 0x20000
+
+#define EFX_BUF_SIZE 4096
+
+/* EV */
+
+typedef struct efx_evq_s efx_evq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderEventQueueBlock 6f3843f5fe7cc843 */
+typedef enum efx_ev_qstat_e {
+ EV_ALL,
+ EV_RX,
+ EV_RX_OK,
+ EV_RX_FRM_TRUNC,
+ EV_RX_TOBE_DISC,
+ EV_RX_PAUSE_FRM_ERR,
+ EV_RX_BUF_OWNER_ID_ERR,
+ EV_RX_IPV4_HDR_CHKSUM_ERR,
+ EV_RX_TCP_UDP_CHKSUM_ERR,
+ EV_RX_ETH_CRC_ERR,
+ EV_RX_IP_FRAG_ERR,
+ EV_RX_MCAST_PKT,
+ EV_RX_MCAST_HASH_MATCH,
+ EV_RX_TCP_IPV4,
+ EV_RX_TCP_IPV6,
+ EV_RX_UDP_IPV4,
+ EV_RX_UDP_IPV6,
+ EV_RX_OTHER_IPV4,
+ EV_RX_OTHER_IPV6,
+ EV_RX_NON_IP,
+ EV_RX_BATCH,
+ EV_TX,
+ EV_TX_WQ_FF_FULL,
+ EV_TX_PKT_ERR,
+ EV_TX_PKT_TOO_BIG,
+ EV_TX_UNEXPECTED,
+ EV_GLOBAL,
+ EV_GLOBAL_MNT,
+ EV_DRIVER,
+ EV_DRIVER_SRM_UPD_DONE,
+ EV_DRIVER_TX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_DONE,
+ EV_DRIVER_RX_DESCQ_FLS_FAILED,
+ EV_DRIVER_RX_DSC_ERROR,
+ EV_DRIVER_TX_DSC_ERROR,
+ EV_DRV_GEN,
+ EV_MCDI_RESPONSE,
+ EV_NQSTATS
+} efx_ev_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderEventQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_ev_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_EVQ_MAXNEVS 32768
+#define EFX_EVQ_MINNEVS 512
+
+#define EFX_EVQ_SIZE(_nevs) ((_nevs) * sizeof (efx_qword_t))
+#define EFX_EVQ_NBUFS(_nevs) (EFX_EVQ_SIZE(_nevs) / EFX_BUF_SIZE)
+
+#define EFX_EVQ_FLAGS_TYPE_MASK (0x3)
+#define EFX_EVQ_FLAGS_TYPE_AUTO (0x0)
+#define EFX_EVQ_FLAGS_TYPE_THROUGHPUT (0x1)
+#define EFX_EVQ_FLAGS_TYPE_LOW_LATENCY (0x2)
+
+#define EFX_EVQ_FLAGS_NOTIFY_MASK (0xC)
+#define EFX_EVQ_FLAGS_NOTIFY_INTERRUPT (0x0) /* Interrupting (default) */
+#define EFX_EVQ_FLAGS_NOTIFY_DISABLED (0x4) /* Non-interrupting */
+
+extern __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __deref_out efx_evq_t **eepp);
+
+extern void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+typedef __checkReturn boolean_t
+(*efx_initialized_ev_t)(
+ __in_opt void *arg);
+
+#define EFX_PKT_UNICAST 0x0004
+#define EFX_PKT_START 0x0008
+
+#define EFX_PKT_VLAN_TAGGED 0x0010
+#define EFX_CKSUM_TCPUDP 0x0020
+#define EFX_CKSUM_IPV4 0x0040
+#define EFX_PKT_CONT 0x0080
+
+#define EFX_CHECK_VLAN 0x0100
+#define EFX_PKT_TCP 0x0200
+#define EFX_PKT_UDP 0x0400
+#define EFX_PKT_IPV4 0x0800
+
+#define EFX_PKT_IPV6 0x1000
+#define EFX_PKT_PREFIX_LEN 0x2000
+#define EFX_ADDR_MISMATCH 0x4000
+#define EFX_DISCARD 0x8000
+
+/*
+ * The following flags are used only for packed stream
+ * mode. The values for the flags are reused to fit into 16 bit,
+ * since EFX_PKT_START and EFX_PKT_CONT are never used in
+ * packed stream mode
+ */
+#define EFX_PKT_PACKED_STREAM_NEW_BUFFER EFX_PKT_START
+#define EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE EFX_PKT_CONT
+
+
+#define EFX_EV_RX_NLABELS 32
+#define EFX_EV_TX_NLABELS 32
+
+typedef __checkReturn boolean_t
+(*efx_rx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t size,
+ __in uint16_t flags);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/*
+ * Packed stream mode is documented in SF-112241-TC.
+ * The general idea is that, instead of putting each incoming
+ * packet into a separate buffer which is specified in a RX
+ * descriptor, a large buffer is provided to the hardware and
+ * packets are put there in a continuous stream.
+ * The main advantage of such an approach is that RX queue refilling
+ * happens much less frequently.
+ */
+
+typedef __checkReturn boolean_t
+(*efx_rx_ps_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id,
+ __in uint32_t pkt_count,
+ __in uint16_t flags);
+
+#endif
+
+typedef __checkReturn boolean_t
+(*efx_tx_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t id);
+
+#define EFX_EXCEPTION_RX_RECOVERY 0x00000001
+#define EFX_EXCEPTION_RX_DSC_ERROR 0x00000002
+#define EFX_EXCEPTION_TX_DSC_ERROR 0x00000003
+#define EFX_EXCEPTION_UNKNOWN_SENSOREVT 0x00000004
+#define EFX_EXCEPTION_FWALERT_SRAM 0x00000005
+#define EFX_EXCEPTION_UNKNOWN_FWALERT 0x00000006
+#define EFX_EXCEPTION_RX_ERROR 0x00000007
+#define EFX_EXCEPTION_TX_ERROR 0x00000008
+#define EFX_EXCEPTION_EV_ERROR 0x00000009
+
+typedef __checkReturn boolean_t
+(*efx_exception_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label,
+ __in uint32_t data);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_rxq_flush_failed_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t rxq_index);
+
+typedef __checkReturn boolean_t
+(*efx_txq_flush_done_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t txq_index);
+
+typedef __checkReturn boolean_t
+(*efx_software_ev_t)(
+ __in_opt void *arg,
+ __in uint16_t magic);
+
+typedef __checkReturn boolean_t
+(*efx_sram_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t code);
+
+#define EFX_SRAM_CLEAR 0
+#define EFX_SRAM_UPDATE 1
+#define EFX_SRAM_ILLEGAL_CLEAR 2
+
+typedef __checkReturn boolean_t
+(*efx_wake_up_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_timer_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t label);
+
+typedef __checkReturn boolean_t
+(*efx_link_change_ev_t)(
+ __in_opt void *arg,
+ __in efx_link_mode_t link_mode);
+
+#if EFSYS_OPT_MON_STATS
+
+typedef __checkReturn boolean_t
+(*efx_monitor_ev_t)(
+ __in_opt void *arg,
+ __in efx_mon_stat_t id,
+ __in efx_mon_stat_value_t value);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef __checkReturn boolean_t
+(*efx_mac_stats_ev_t)(
+ __in_opt void *arg,
+ __in uint32_t generation
+ );
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+typedef struct efx_ev_callbacks_s {
+ efx_initialized_ev_t eec_initialized;
+ efx_rx_ev_t eec_rx;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ efx_rx_ps_ev_t eec_rx_ps;
+#endif
+ efx_tx_ev_t eec_tx;
+ efx_exception_ev_t eec_exception;
+ efx_rxq_flush_done_ev_t eec_rxq_flush_done;
+ efx_rxq_flush_failed_ev_t eec_rxq_flush_failed;
+ efx_txq_flush_done_ev_t eec_txq_flush_done;
+ efx_software_ev_t eec_software;
+ efx_sram_ev_t eec_sram;
+ efx_wake_up_ev_t eec_wake_up;
+ efx_timer_ev_t eec_timer;
+ efx_link_change_ev_t eec_link_change;
+#if EFSYS_OPT_MON_STATS
+ efx_monitor_ev_t eec_monitor;
+#endif /* EFSYS_OPT_MON_STATS */
+#if EFSYS_OPT_MAC_STATS
+ efx_mac_stats_ev_t eec_mac_stats;
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_ev_callbacks_t;
+
+extern __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_EV_PREFETCH
+
+extern void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+extern void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg);
+
+extern __checkReturn efx_rc_t
+efx_ev_usecs_to_ticks(
+ __in efx_nic_t *enp,
+ __in unsigned int usecs,
+ __out unsigned int *ticksp);
+
+extern __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+extern __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+/* RX */
+
+extern __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp);
+
+extern void
+efx_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+
+typedef enum efx_rx_hash_alg_e {
+ EFX_RX_HASHALG_LFSR = 0,
+ EFX_RX_HASHALG_TOEPLITZ
+} efx_rx_hash_alg_t;
+
+#define EFX_RX_HASH_IPV4 (1U << 0)
+#define EFX_RX_HASH_TCPIPV4 (1U << 1)
+#define EFX_RX_HASH_IPV6 (1U << 2)
+#define EFX_RX_HASH_TCPIPV6 (1U << 3)
+
+typedef unsigned int efx_rx_hash_type_t;
+
+typedef enum efx_rx_hash_support_e {
+ EFX_RX_HASH_UNAVAILABLE = 0, /* Hardware hash not inserted */
+ EFX_RX_HASH_AVAILABLE /* Insert hash with/without RSS */
+} efx_rx_hash_support_t;
+
+#define EFX_RSS_TBL_SIZE 128 /* Rows in RX indirection table */
+#define EFX_MAXRSS 64 /* RX indirection entry range */
+#define EFX_MAXRSS_LEGACY 16 /* See bug16611 and bug17213 */
+
+typedef enum efx_rx_scale_support_e {
+ EFX_RX_SCALE_UNAVAILABLE = 0, /* Not supported */
+ EFX_RX_SCALE_EXCLUSIVE, /* Writable key/indirection table */
+ EFX_RX_SCALE_SHARED /* Read-only key/indirection table */
+} efx_rx_scale_support_t;
+
+extern __checkReturn efx_rc_t
+efx_rx_hash_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp);
+
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_support_t *supportp);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+extern __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+extern __checkReturn uint32_t
+efx_pseudo_hdr_hash_get(
+ __in efx_rxq_t *erp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+extern __checkReturn efx_rc_t
+efx_pseudo_hdr_pkt_length_get(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __out uint16_t *pkt_lengthp);
+
+#define EFX_RXQ_MAXNDESCS 4096
+#define EFX_RXQ_MINNDESCS 512
+
+#define EFX_RXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_RXQ_NBUFS(_ndescs) (EFX_RXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_RXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+#define EFX_RXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
+
+typedef enum efx_rxq_type_e {
+ EFX_RXQ_TYPE_DEFAULT,
+ EFX_RXQ_TYPE_SCATTER,
+ EFX_RXQ_TYPE_PACKED_STREAM_1M,
+ EFX_RXQ_TYPE_PACKED_STREAM_512K,
+ EFX_RXQ_TYPE_PACKED_STREAM_256K,
+ EFX_RXQ_TYPE_PACKED_STREAM_128K,
+ EFX_RXQ_TYPE_PACKED_STREAM_64K,
+ EFX_RXQ_NTYPES
+} efx_rxq_type_t;
+
+extern __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp);
+
+typedef struct efx_buffer_s {
+ efsys_dma_addr_t eb_addr;
+ size_t eb_size;
+ boolean_t eb_eop;
+} efx_buffer_t;
+
+typedef struct efx_desc_s {
+ efx_qword_t ed_eq;
+} efx_desc_t;
+
+extern void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+extern void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+/*
+ * Fake length for RXQ descriptors in packed stream mode
+ * to make hardware happy
+ */
+#define EFX_RXQ_PACKED_STREAM_FAKE_BUF_SIZE 32
+
+extern void
+efx_rx_qps_update_credits(
+ __in efx_rxq_t *erp);
+
+extern __checkReturn uint8_t *
+efx_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+extern __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qenable(
+ __in efx_rxq_t *erp);
+
+extern void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+/* TX */
+
+typedef struct efx_txq_s efx_txq_t;
+
+#if EFSYS_OPT_QSTATS
+
+/* START MKCONFIG GENERATED EfxHeaderTransmitQueueBlock 12dff8778598b2db */
+typedef enum efx_tx_qstat_e {
+ TX_POST,
+ TX_POST_PIO,
+ TX_NQSTATS
+} efx_tx_qstat_t;
+
+/* END MKCONFIG GENERATED EfxHeaderTransmitQueueBlock */
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_tx_fini(
+ __in efx_nic_t *enp);
+
+#define EFX_TXQ_MINNDESCS 512
+
+#define EFX_TXQ_SIZE(_ndescs) ((_ndescs) * sizeof (efx_qword_t))
+#define EFX_TXQ_NBUFS(_ndescs) (EFX_TXQ_SIZE(_ndescs) / EFX_BUF_SIZE)
+#define EFX_TXQ_LIMIT(_ndescs) ((_ndescs) - 16)
+#define EFX_TXQ_DC_NDESCS(_dcsize) (8 << _dcsize)
+
+#define EFX_TXQ_MAX_BUFS 8 /* Maximum independent of EFX_BUG35388_WORKAROUND. */
+
+#define EFX_TXQ_CKSUM_IPV4 0x0001
+#define EFX_TXQ_CKSUM_TCPUDP 0x0002
+#define EFX_TXQ_FATSOV2 0x0004
+
+extern __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+extern void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+extern __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qenable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp);
+
+extern void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset);
+
+extern __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+extern void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+extern void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp);
+
+/* Number of FATSOv2 option descriptors */
+#define EFX_TX_FATSOV2_OPT_NDESCS 2
+
+/* Maximum number of DMA segments per TSO packet (not superframe) */
+#define EFX_TX_FATSOV2_DMA_SEGS_PER_PKT_MAX 24
+
+extern void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t tcp_mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count);
+
+extern void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+
+#if EFSYS_OPT_NAMES
+
+extern const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *etp,
+ __in unsigned int id);
+
+#endif /* EFSYS_OPT_NAMES */
+
+extern void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+
+#endif /* EFSYS_OPT_QSTATS */
+
+extern void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+
+/* FILTER */
+
+#if EFSYS_OPT_FILTER
+
+#define EFX_ETHER_TYPE_IPV4 0x0800
+#define EFX_ETHER_TYPE_IPV6 0x86DD
+
+#define EFX_IPPROTO_TCP 6
+#define EFX_IPPROTO_UDP 17
+
+/* Use RSS to spread across multiple queues */
+#define EFX_FILTER_FLAG_RX_RSS 0x01
+/* Enable RX scatter */
+#define EFX_FILTER_FLAG_RX_SCATTER 0x02
+/*
+ * Override an automatic filter (priority EFX_FILTER_PRI_AUTO).
+ * May only be set by the filter implementation for each type.
+ * A removal request will restore the automatic filter in its place.
+ */
+#define EFX_FILTER_FLAG_RX_OVER_AUTO 0x04
+/* Filter is for RX */
+#define EFX_FILTER_FLAG_RX 0x08
+/* Filter is for TX */
+#define EFX_FILTER_FLAG_TX 0x10
+
+typedef unsigned int efx_filter_flags_t;
+
+typedef enum efx_filter_match_flags_e {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001, /* Match by remote IP host
+ * address */
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002, /* Match by local IP host
+ * address */
+ EFX_FILTER_MATCH_REM_MAC = 0x0004, /* Match by remote MAC address */
+ EFX_FILTER_MATCH_REM_PORT = 0x0008, /* Match by remote TCP/UDP port */
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010, /* Match by remote TCP/UDP port */
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020, /* Match by local TCP/UDP port */
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040, /* Match by Ether-type */
+ EFX_FILTER_MATCH_INNER_VID = 0x0080, /* Match by inner VLAN ID */
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100, /* Match by outer VLAN ID */
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200, /* Match by IP transport
+ * protocol */
+ /* Match otherwise-unmatched multicast and broadcast packets */
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST = 0x40000000,
+ /* Match otherwise-unmatched unicast packets */
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST = 0x80000000,
+} efx_filter_match_flags_t;
+
+typedef enum efx_filter_priority_s {
+ EFX_FILTER_PRI_HINT = 0, /* Performance hint */
+ EFX_FILTER_PRI_AUTO, /* Automatic filter based on device
+ * address list or hardware
+ * requirements. This may only be used
+ * by the filter implementation for
+ * each NIC type. */
+ EFX_FILTER_PRI_MANUAL, /* Manually configured filter */
+ EFX_FILTER_PRI_REQUIRED, /* Required for correct behaviour of the
+ * client (e.g. SR-IOV, HyperV VMQ etc.)
+ */
+} efx_filter_priority_t;
+
+/*
+ * FIXME: All these fields are assumed to be in little-endian byte order.
+ * It may be better for some to be big-endian. See bug42804.
+ */
+
+typedef struct efx_filter_spec_s {
+ uint32_t efs_match_flags;
+ uint32_t efs_priority:2;
+ uint32_t efs_flags:6;
+ uint32_t efs_dmaq_id:12;
+ uint32_t efs_rss_context;
+ uint16_t efs_outer_vid;
+ uint16_t efs_inner_vid;
+ uint8_t efs_loc_mac[EFX_MAC_ADDR_LEN];
+ uint8_t efs_rem_mac[EFX_MAC_ADDR_LEN];
+ uint16_t efs_ether_type;
+ uint8_t efs_ip_proto;
+ uint16_t efs_loc_port;
+ uint16_t efs_rem_port;
+ efx_oword_t efs_rem_host;
+ efx_oword_t efs_loc_host;
+} efx_filter_spec_t;
+
+
+/* Default values for use in filter specifications */
+#define EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT 0xffffffff
+#define EFX_FILTER_SPEC_RX_DMAQ_ID_DROP 0xfff
+#define EFX_FILTER_SPEC_VID_UNSPEC 0xffff
+
+extern __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_filter_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+extern void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flags_t flags,
+ __in efx_rxq_t *erp);
+
+extern void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec);
+
+extern __checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec);
+
+#endif /* EFSYS_OPT_FILTER */
+
+/* HASH */
+
+extern __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init);
+
+extern __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init);
+
+#if EFSYS_OPT_LICENSING
+
+/* LICENSING */
+
+typedef struct efx_key_stats_s {
+ uint32_t eks_valid;
+ uint32_t eks_invalid;
+ uint32_t eks_blacklisted;
+ uint32_t eks_unverifiable;
+ uint32_t eks_wrong_node;
+ uint32_t eks_licensed_apps_lo;
+ uint32_t eks_licensed_apps_hi;
+ uint32_t eks_licensed_features_lo;
+ uint32_t eks_licensed_features_hi;
+} efx_key_stats_t;
+
+extern __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp);
+
+extern void
+efx_lic_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *ksp);
+
+extern __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+extern __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp);
+
+
+extern __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+extern __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_LICENSING */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_H */
diff --git a/drivers/net/sfc/base/efx_bootcfg.c b/drivers/net/sfc/base/efx_bootcfg.c
new file mode 100644
index 00000000..d589c86a
--- /dev/null
+++ b/drivers/net/sfc/base/efx_bootcfg.c
@@ -0,0 +1,563 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_BOOTCFG
+
+/*
+ * Maximum size of BOOTCFG block across all nics as understood by SFCgPXE.
+ * NOTE: This is larger than the Medford per-PF bootcfg sector.
+ */
+#define BOOTCFG_MAX_SIZE 0x1000
+
+/* Medford per-PF bootcfg sector */
+#define BOOTCFG_PER_PF 0x800
+#define BOOTCFG_PF_COUNT 16
+
+#define DHCP_END ((uint8_t)0xff)
+#define DHCP_PAD ((uint8_t)0)
+
+
+/* Report the layout of bootcfg sectors in NVRAM partition. */
+ __checkReturn efx_rc_t
+efx_bootcfg_sector_info(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __out_opt uint32_t *sector_countp,
+ __out size_t *offsetp,
+ __out size_t *max_sizep)
+{
+ uint32_t count;
+ size_t max_size;
+ size_t offset;
+ int rc;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ max_size = BOOTCFG_MAX_SIZE;
+ offset = 0;
+ count = 1;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ max_size = BOOTCFG_MAX_SIZE;
+ offset = 0;
+ count = 1;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD: {
+ /* Shared partition (array indexed by PF) */
+ max_size = BOOTCFG_PER_PF;
+ count = BOOTCFG_PF_COUNT;
+ if (pf >= count) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ offset = max_size * pf;
+ break;
+ }
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ EFSYS_ASSERT3U(max_size, <=, BOOTCFG_MAX_SIZE);
+
+ if (sector_countp != NULL)
+ *sector_countp = count;
+ *offsetp = offset;
+ *max_sizep = max_size;
+
+ return (0);
+
+#if EFSYS_OPT_MEDFORD
+fail2:
+ EFSYS_PROBE(fail2);
+#endif
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+
+static __checkReturn uint8_t
+efx_bootcfg_csum(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ unsigned int pos;
+ uint8_t checksum = 0;
+
+ for (pos = 0; pos < size; pos++)
+ checksum += data[pos];
+ return (checksum);
+}
+
+static __checkReturn efx_rc_t
+efx_bootcfg_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) uint8_t const *data,
+ __in size_t size,
+ __out_opt size_t *usedp)
+{
+ size_t offset = 0;
+ size_t used = 0;
+ efx_rc_t rc;
+
+ /* Start parsing tags immediately after the checksum */
+ for (offset = 1; offset < size; ) {
+ uint8_t tag;
+ uint8_t length;
+
+ /* Consume tag */
+ tag = data[offset];
+ if (tag == DHCP_END) {
+ offset++;
+ used = offset;
+ break;
+ }
+ if (tag == DHCP_PAD) {
+ offset++;
+ continue;
+ }
+
+ /* Consume length */
+ if (offset + 1 >= size) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ length = data[offset + 1];
+
+ /* Consume *length */
+ if (offset + 1 + length >= size) {
+ rc = ENOSPC;
+ goto fail2;
+ }
+
+ offset += 2 + length;
+ used = offset;
+ }
+
+ /* Checksum the entire sector, including bytes after any DHCP_END */
+ if (efx_bootcfg_csum(enp, data, size) != 0) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ if (usedp != NULL)
+ *usedp = used;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Copy bootcfg sector data to a target buffer which may differ in size.
+ * Optionally corrects format errors in source buffer.
+ */
+ efx_rc_t
+efx_bootcfg_copy_sector(
+ __in efx_nic_t *enp,
+ __inout_bcount(sector_length)
+ uint8_t *sector,
+ __in size_t sector_length,
+ __out_bcount(data_size) uint8_t *data,
+ __in size_t data_size,
+ __in boolean_t handle_format_errors)
+{
+ size_t used_bytes;
+ efx_rc_t rc;
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, sector, sector_length,
+ &used_bytes);
+
+ if (!handle_format_errors) {
+ if (rc != 0)
+ goto fail1;
+
+ if ((used_bytes < 2) ||
+ (sector[used_bytes - 1] != DHCP_END)) {
+ /* Block too short, or DHCP_END missing */
+ rc = ENOENT;
+ goto fail2;
+ }
+ }
+
+ /* Synthesize empty format on verification failure */
+ if (rc != 0 || used_bytes == 0) {
+ sector[0] = 0;
+ sector[1] = DHCP_END;
+ used_bytes = 2;
+ }
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+ EFSYS_ASSERT(sector_length >= 2);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does.
+ * Reinitialise the sector if there isn't room for the character.
+ */
+ if (sector[used_bytes - 1] != DHCP_END) {
+ if (used_bytes >= sector_length) {
+ sector[0] = 0;
+ used_bytes = 1;
+ }
+ sector[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the target buffer is large enough for the
+ * entire used bootcfg area, then copy into the target buffer.
+ */
+ if (used_bytes > data_size) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+ memcpy(data, sector, used_bytes);
+
+ /* Zero out the unused portion of the target buffer */
+ if (used_bytes < data_size)
+ (void) memset(data + used_bytes, 0, data_size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, data_size);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *payload = NULL;
+ size_t used_bytes;
+ size_t partn_length;
+ size_t sector_length;
+ size_t sector_offset;
+ efx_rc_t rc;
+ uint32_t sector_number;
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ sector_number = enp->en_nic_cfg.enc_pf;
+#else
+ sector_number = 0;
+#endif
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
+ if (rc != 0)
+ goto fail1;
+
+ /* The bootcfg sector may be stored in a (larger) shared partition */
+ rc = efx_bootcfg_sector_info(enp, sector_number,
+ NULL, &sector_offset, &sector_length);
+ if (rc != 0)
+ goto fail2;
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if (sector_offset + sector_length > partn_length) {
+ /* Partition is too small */
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ /*
+ * We need to read the entire BOOTCFG sector to ensure we read all the
+ * tags, because legacy bootcfg sectors are not guaranteed to end with
+ * a DHCP_END character. If the user hasn't supplied a sufficiently
+ * large buffer then use our own buffer.
+ */
+ if (sector_length > size) {
+ EFSYS_KMEM_ALLOC(enp->en_esip, sector_length, payload);
+ if (payload == NULL) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ } else
+ payload = (uint8_t *)data;
+
+ if ((rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL)) != 0)
+ goto fail5;
+
+ if ((rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ sector_offset, (caddr_t)payload, sector_length)) != 0) {
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+ goto fail6;
+ }
+
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail7;
+
+ /* Verify that the area is correctly formatted and checksummed */
+ rc = efx_bootcfg_verify(enp, (caddr_t)payload, sector_length,
+ &used_bytes);
+ if (rc != 0 || used_bytes == 0) {
+ payload[0] = (uint8_t)~DHCP_END;
+ payload[1] = DHCP_END;
+ used_bytes = 2;
+ }
+
+ EFSYS_ASSERT(used_bytes >= 2); /* checksum and DHCP_END */
+ EFSYS_ASSERT(used_bytes <= sector_length);
+
+ /*
+ * Legacy bootcfg sectors don't terminate with a DHCP_END character.
+ * Modify the returned payload so it does. BOOTCFG_MAX_SIZE is by
+ * definition large enough for any valid (per-port) bootcfg sector,
+ * so reinitialise the sector if there isn't room for the character.
+ */
+ if (payload[used_bytes - 1] != DHCP_END) {
+ if (used_bytes + 1 > sector_length) {
+ payload[0] = 0;
+ used_bytes = 1;
+ }
+
+ payload[used_bytes] = DHCP_END;
+ ++used_bytes;
+ }
+
+ /*
+ * Verify that the user supplied buffer is large enough for the
+ * entire used bootcfg area, then copy into the user supplied buffer.
+ */
+ if (used_bytes > size) {
+ rc = ENOSPC;
+ goto fail8;
+ }
+ if (sector_length > size) {
+ memcpy(data, payload, used_bytes);
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+ }
+
+ /* Zero out the unused portion of the user buffer */
+ if (used_bytes < size)
+ (void) memset(data + used_bytes, 0, size - used_bytes);
+
+ /*
+ * The checksum includes trailing data after any DHCP_END character,
+ * which we've just modified (by truncation or appending DHCP_END).
+ */
+ data[0] -= efx_bootcfg_csum(enp, data, size);
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+ if (sector_length > size)
+ EFSYS_KMEM_FREE(enp->en_esip, sector_length, payload);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_bootcfg_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ uint8_t *partn_data;
+ uint8_t checksum;
+ size_t partn_length;
+ size_t sector_length;
+ size_t sector_offset;
+ size_t used_bytes;
+ efx_rc_t rc;
+ uint32_t sector_number;
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ sector_number = enp->en_nic_cfg.enc_pf;
+#else
+ sector_number = 0;
+#endif
+
+ rc = efx_nvram_size(enp, EFX_NVRAM_BOOTROM_CFG, &partn_length);
+ if (rc != 0)
+ goto fail1;
+
+ /* The bootcfg sector may be stored in a (larger) shared partition */
+ rc = efx_bootcfg_sector_info(enp, sector_number,
+ NULL, &sector_offset, &sector_length);
+ if (rc != 0)
+ goto fail2;
+
+ if (sector_length > BOOTCFG_MAX_SIZE)
+ sector_length = BOOTCFG_MAX_SIZE;
+
+ if (sector_offset + sector_length > partn_length) {
+ /* Partition is too small */
+ rc = EFBIG;
+ goto fail3;
+ }
+
+ if ((rc = efx_bootcfg_verify(enp, data, size, &used_bytes)) != 0)
+ goto fail4;
+
+ /* The caller *must* terminate their block with a DHCP_END character */
+ if ((used_bytes < 2) || ((uint8_t)data[used_bytes - 1] != DHCP_END)) {
+ /* Block too short or DHCP_END missing */
+ rc = ENOENT;
+ goto fail5;
+ }
+
+ /* Check that the hardware has support for this much data */
+ if (used_bytes > MIN(sector_length, BOOTCFG_MAX_SIZE)) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /*
+ * If the BOOTCFG sector is stored in a shared partition, then we must
+ * read the whole partition and insert the updated bootcfg sector at the
+ * correct offset.
+ */
+ EFSYS_KMEM_ALLOC(enp->en_esip, partn_length, partn_data);
+ if (partn_data == NULL) {
+ rc = ENOMEM;
+ goto fail7;
+ }
+
+ rc = efx_nvram_rw_start(enp, EFX_NVRAM_BOOTROM_CFG, NULL);
+ if (rc != 0)
+ goto fail8;
+
+ /* Read the entire partition */
+ rc = efx_nvram_read_chunk(enp, EFX_NVRAM_BOOTROM_CFG, 0,
+ (caddr_t)partn_data, partn_length);
+ if (rc != 0)
+ goto fail9;
+
+ /*
+ * Insert the BOOTCFG sector into the partition, Zero out all data after
+ * the DHCP_END tag, and adjust the checksum.
+ */
+ (void) memset(partn_data + sector_offset, 0x0, sector_length);
+ (void) memcpy(partn_data + sector_offset, data, used_bytes);
+
+ checksum = efx_bootcfg_csum(enp, data, used_bytes);
+ partn_data[sector_offset] -= checksum;
+
+ if ((rc = efx_nvram_erase(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail10;
+
+ if ((rc = efx_nvram_write_chunk(enp, EFX_NVRAM_BOOTROM_CFG,
+ 0, (caddr_t)partn_data, partn_length)) != 0)
+ goto fail11;
+
+ if ((rc = efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG)) != 0)
+ goto fail12;
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data);
+
+ return (0);
+
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+
+ (void) efx_nvram_rw_finish(enp, EFX_NVRAM_BOOTROM_CFG);
+fail8:
+ EFSYS_PROBE(fail8);
+
+ EFSYS_KMEM_FREE(enp->en_esip, partn_length, partn_data);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BOOTCFG */
diff --git a/drivers/net/sfc/base/efx_check.h b/drivers/net/sfc/base/efx_check.h
new file mode 100644
index 00000000..c8548c04
--- /dev/null
+++ b/drivers/net/sfc/base/efx_check.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_CHECK_H
+#define _SYS_EFX_CHECK_H
+
+#include "efsys.h"
+
+/*
+ * Check that the efsys.h header in client code has a valid combination of
+ * EFSYS_OPT_xxx options.
+ *
+ * NOTE: Keep checks for obsolete options here to ensure that they are removed
+ * from client code (and do not reappear in merges from other branches).
+ */
+
+#ifdef EFSYS_OPT_FALCON
+# error "FALCON is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_BOOTCFG
+/* Support NVRAM based boot config */
+# if !EFSYS_OPT_NVRAM
+# error "BOOTCFG requires NVRAM"
+# endif
+#endif /* EFSYS_OPT_BOOTCFG */
+
+#if EFSYS_OPT_CHECK_REG
+/* Verify chip implements accessed registers */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "CHECK_REG requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_CHECK_REG */
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+/* Decode fatal errors */
+# if !EFSYS_OPT_SIENA
+# error "INTR_FATAL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_DECODE_INTR_FATAL */
+
+#if EFSYS_OPT_DIAG
+/* Support diagnostic hardware tests */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "DIAG requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_EV_PREFETCH
+/* Support optimized EVQ data access */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "EV_PREFETCH requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#ifdef EFSYS_OPT_FALCON_NIC_CFG_OVERRIDE
+# error "FALCON_NIC_CFG_OVERRIDE is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_FILTER
+/* Support hardware packet filters */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "FILTER requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_FILTER */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# if !EFSYS_OPT_FILTER
+# error "HUNTINGTON or MEDFORD requires FILTER"
+# endif
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_LOOPBACK
+/* Support hardware loopback modes */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "LOOPBACK requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#ifdef EFSYS_OPT_MAC_FALCON_GMAC
+# error "MAC_FALCON_GMAC is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MAC_FALCON_XMAC
+# error "MAC_FALCON_XMAC is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+/* Support MAC statistics */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MAC_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#if EFSYS_OPT_MCDI
+/* Support management controller messages */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MCDI */
+
+#if (EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# if !EFSYS_OPT_MCDI
+# error "SIENA or HUNTINGTON or MEDFORD requires MCDI"
+# endif
+#endif
+
+#if EFSYS_OPT_MCDI_LOGGING
+/* Support MCDI logging */
+# if !EFSYS_OPT_MCDI
+# error "MCDI_LOGGING requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+/* Support MCDI proxy authorization */
+# if !EFSYS_OPT_MCDI
+# error "MCDI_PROXY_AUTH requires MCDI"
+# endif
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#ifdef EFSYS_OPT_MON_LM87
+# error "MON_LM87 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_MAX6647
+# error "MON_MAX6647 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_NULL
+# error "MON_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_MON_SIENA
+# error "MON_SIENA is obsolete (replaced by MON_MCDI)."
+#endif
+
+#ifdef EFSYS_OPT_MON_HUNTINGTON
+# error "MON_HUNTINGTON is obsolete (replaced by MON_MCDI)."
+#endif
+
+#if EFSYS_OPT_MON_STATS
+/* Support monitor statistics (voltage/temperature) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MON_STATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MON_STATS */
+
+#if EFSYS_OPT_MON_MCDI
+/* Support Monitor via mcdi */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "MON_MCDI requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_MON_MCDI*/
+
+#if EFSYS_OPT_NAMES
+/* Support printable names for statistics */
+# if !(EFSYS_OPT_LOOPBACK || EFSYS_OPT_MAC_STATS || EFSYS_OPT_MCDI || \
+ EFSYS_MON_STATS || EFSYS_OPT_PHY_STATS || EFSYS_OPT_QSTATS)
+# error "NAMES requires LOOPBACK or xxxSTATS or MCDI"
+# endif
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_NVRAM
+/* Support non volatile configuration */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "NVRAM requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_NVRAM */
+
+#ifdef EFSYS_OPT_NVRAM_FALCON_BOOTROM
+# error "NVRAM_FALCON_BOOTROM is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFT9001
+# error "NVRAM_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_NVRAM_SFX7101
+# error "NVRAM_SFX7101 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PCIE_TUNE
+# error "PCIE_TUNE is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_BIST
+# error "PHY_BIST is obsolete (replaced by BIST)."
+#endif
+
+#if EFSYS_OPT_PHY_FLAGS
+/* Support PHY flags */
+# if !EFSYS_OPT_SIENA
+# error "PHY_FLAGS requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+/* Support for PHY LED control */
+# if !EFSYS_OPT_SIENA
+# error "PHY_LED_CONTROL requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+#ifdef EFSYS_OPT_PHY_NULL
+# error "PHY_NULL is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PM8358
+# error "PHY_PM8358 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_PROPS
+# error "PHY_PROPS is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2022C2
+# error "PHY_QT2022C2 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_QT2025C
+# error "PHY_QT2025C is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFT9001
+# error "PHY_SFT9001 is obsolete and is not supported."
+#endif
+
+#ifdef EFSYS_OPT_PHY_SFX7101
+# error "PHY_SFX7101 is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_PHY_STATS
+/* Support PHY statistics */
+# if !EFSYS_OPT_SIENA
+# error "PHY_STATS requires SIENA"
+# endif
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#ifdef EFSYS_OPT_PHY_TXC43128
+# error "PHY_TXC43128 is obsolete and is not supported."
+#endif
+
+#if EFSYS_OPT_QSTATS
+/* Support EVQ/RXQ/TXQ statistics */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "QSTATS requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_QSTATS */
+
+#ifdef EFSYS_OPT_RX_HDR_SPLIT
+# error "RX_HDR_SPLIT is obsolete and is not supported"
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+/* Support receive scaling (RSS) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCALE requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCATTER
+/* Support receive scatter DMA */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "RX_SCATTER requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#ifdef EFSYS_OPT_STAT_NAME
+# error "STAT_NAME is obsolete (replaced by NAMES)."
+#endif
+
+#if EFSYS_OPT_VPD
+/* Support PCI Vital Product Data (VPD) */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "VPD requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_VPD */
+
+#ifdef EFSYS_OPT_WOL
+# error "WOL is obsolete and is not supported"
+#endif /* EFSYS_OPT_WOL */
+
+#ifdef EFSYS_OPT_MCAST_FILTER_LIST
+# error "MCAST_FILTER_LIST is obsolete and is not supported"
+#endif
+
+#if EFSYS_OPT_BIST
+/* Support BIST */
+# if !(EFSYS_OPT_SIENA || EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "BIST requires SIENA or HUNTINGTON or MEDFORD"
+# endif
+#endif /* EFSYS_OPT_BIST */
+
+#if EFSYS_OPT_LICENSING
+/* Support MCDI licensing API */
+# if !EFSYS_OPT_MCDI
+# error "LICENSING requires MCDI"
+# endif
+# if !EFSYS_HAS_UINT64
+# error "LICENSING requires UINT64"
+# endif
+#endif /* EFSYS_OPT_LICENSING */
+
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+/* Support adapters with missing static config (for factory use only) */
+# if !EFSYS_OPT_MEDFORD
+# error "ALLOW_UNCONFIGURED_NIC requires MEDFORD"
+# endif
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+/* Support packed stream mode */
+# if !(EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+# error "PACKED_STREAM requires HUNTINGTON or MEDFORD"
+# endif
+#endif
+
+#endif /* _SYS_EFX_CHECK_H */
diff --git a/drivers/net/sfc/base/efx_crc32.c b/drivers/net/sfc/base/efx_crc32.c
new file mode 100644
index 00000000..27e2708a
--- /dev/null
+++ b/drivers/net/sfc/base/efx_crc32.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/*
+ * Precomputed table for computing IEEE 802.3 CRC32
+ * with polynomial 0x04c11db7 (bit-reversed 0xedb88320)
+ */
+
+static const uint32_t efx_crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+/* Calculate the IEEE 802.3 CRC32 of a MAC addr */
+ __checkReturn uint32_t
+efx_crc32_calculate(
+ __in uint32_t crc_init,
+ __in_ecount(length) uint8_t const *input,
+ __in int length)
+{
+ int index;
+ uint32_t crc = crc_init;
+
+ for (index = 0; index < length; index++) {
+ uint32_t data = *(input++);
+ crc = (crc >> 8) ^ efx_crc32_table[(crc ^ data) & 0xff];
+ }
+
+ return (crc);
+}
diff --git a/drivers/net/sfc/base/efx_ev.c b/drivers/net/sfc/base/efx_ev.c
new file mode 100644
index 00000000..42ded5aa
--- /dev/null
+++ b/drivers/net/sfc/base/efx_ev.c
@@ -0,0 +1,1470 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_QSTATS
+#define EFX_EV_QSTAT_INCR(_eep, _stat) \
+ do { \
+ (_eep)->ee_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_EV_QSTAT_INCR(_eep, _stat)
+#endif
+
+#define EFX_EV_PRESENT(_qword) \
+ (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \
+ EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff)
+
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep);
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep);
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count);
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data);
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat);
+
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_ev_ops_t __efx_ev_siena_ops = {
+ siena_ev_init, /* eevo_init */
+ siena_ev_fini, /* eevo_fini */
+ siena_ev_qcreate, /* eevo_qcreate */
+ siena_ev_qdestroy, /* eevo_qdestroy */
+ siena_ev_qprime, /* eevo_qprime */
+ siena_ev_qpost, /* eevo_qpost */
+ siena_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ siena_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_ev_ops_t __efx_ev_ef10_ops = {
+ ef10_ev_init, /* eevo_init */
+ ef10_ev_fini, /* eevo_fini */
+ ef10_ev_qcreate, /* eevo_qcreate */
+ ef10_ev_qdestroy, /* eevo_qdestroy */
+ ef10_ev_qprime, /* eevo_qprime */
+ ef10_ev_qpost, /* eevo_qpost */
+ ef10_ev_qmoderate, /* eevo_qmoderate */
+#if EFSYS_OPT_QSTATS
+ ef10_ev_qstats_update, /* eevo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_ev_init(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (enp->en_mod_flags & EFX_MOD_EV) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eevop = &__efx_ev_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eevop = &__efx_ev_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ if ((rc = eevop->eevo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_eevop = eevop;
+ enp->en_mod_flags |= EFX_MOD_EV;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+ return (rc);
+}
+
+ void
+efx_ev_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0);
+
+ eevop->eevo_fini(enp);
+
+ enp->en_eevop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_EV;
+}
+
+
+ __checkReturn efx_rc_t
+efx_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __deref_out efx_evq_t **eepp)
+{
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_evq_t *eep;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV);
+
+ EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, encp->enc_evq_limit);
+
+ switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) {
+ case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT:
+ break;
+ case EFX_EVQ_FLAGS_NOTIFY_DISABLED:
+ if (us != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ /* Allocate an EVQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep);
+ if (eep == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ eep->ee_magic = EFX_EVQ_MAGIC;
+ eep->ee_enp = enp;
+ eep->ee_index = index;
+ eep->ee_mask = n - 1;
+ eep->ee_flags = flags;
+ eep->ee_esmp = esmp;
+
+ /*
+ * Set outputs before the queue is created because interrupts may be
+ * raised for events immediately after the queue is created, before the
+ * function call below returns. See bug58606.
+ *
+ * The eepp pointer passed in by the client must therefore point to data
+ * shared with the client's event processing context.
+ */
+ enp->en_ev_qcount++;
+ *eepp = eep;
+
+ if ((rc = eevop->eevo_qcreate(enp, index, esmp, n, id, us, flags,
+ eep)) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+ *eepp = NULL;
+ enp->en_ev_qcount--;
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_ev_qcount != 0);
+ --enp->en_ev_qcount;
+
+ eevop->eevo_qdestroy(eep);
+
+ /* Free the EVQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_INTR)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = eevop->eevo_qprime(eep, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn boolean_t
+efx_ev_qpending(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ size_t offset;
+ efx_qword_t qword;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword);
+
+ return (EFX_EV_PRESENT(qword));
+}
+
+#if EFSYS_OPT_EV_PREFETCH
+
+ void
+efx_ev_qprefetch(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ unsigned int offset;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+}
+
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+#define EFX_EV_BATCH 8
+
+ void
+efx_ev_qpoll(
+ __in efx_evq_t *eep,
+ __inout unsigned int *countp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_qword_t ev[EFX_EV_BATCH];
+ unsigned int batch;
+ unsigned int total;
+ unsigned int count;
+ unsigned int index;
+ size_t offset;
+
+ /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN);
+ EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH);
+
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV);
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV ==
+ FSE_AZ_EV_CODE_DRV_GEN_EV);
+#if EFSYS_OPT_MCDI
+ EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV ==
+ FSE_AZ_EV_CODE_MCDI_EVRESPONSE);
+#endif
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+ EFSYS_ASSERT(countp != NULL);
+ EFSYS_ASSERT(eecp != NULL);
+
+ count = *countp;
+ do {
+ /* Read up until the end of the batch period */
+ batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1));
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (total = 0; total < batch; ++total) {
+ EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
+
+ if (!EFX_EV_PRESENT(ev[total]))
+ break;
+
+ EFSYS_PROBE3(event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
+
+ offset += sizeof (efx_qword_t);
+ }
+
+#if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1)
+ /*
+ * Prefetch the next batch when we get within PREFETCH_PERIOD
+ * of a completed batch. If the batch is smaller, then prefetch
+ * immediately.
+ */
+ if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD)
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ /* Process the batch of events */
+ for (index = 0; index < total; ++index) {
+ boolean_t should_abort;
+ uint32_t code;
+
+#if EFSYS_OPT_EV_PREFETCH
+ /* Prefetch if we've now reached the batch period */
+ if (total == batch &&
+ index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) {
+ offset = (count + batch) & eep->ee_mask;
+ offset *= sizeof (efx_qword_t);
+
+ EFSYS_MEM_PREFETCH(eep->ee_esmp, offset);
+ }
+#endif /* EFSYS_OPT_EV_PREFETCH */
+
+ EFX_EV_QSTAT_INCR(eep, EV_ALL);
+
+ code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE);
+ switch (code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ should_abort = eep->ee_rx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ should_abort = eep->ee_tx(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ should_abort = eep->ee_driver(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ should_abort = eep->ee_drv_gen(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#if EFSYS_OPT_MCDI
+ case FSE_AZ_EV_CODE_MCDI_EVRESPONSE:
+ should_abort = eep->ee_mcdi(eep,
+ &(ev[index]), eecp, arg);
+ break;
+#endif
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (eep->ee_global) {
+ should_abort = eep->ee_global(eep,
+ &(ev[index]), eecp, arg);
+ break;
+ }
+ /* else fallthrough */
+ default:
+ EFSYS_PROBE3(bad_event,
+ unsigned int, eep->ee_index,
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
+ uint32_t,
+ EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ (void) eecp->eec_exception(arg,
+ EFX_EXCEPTION_EV_ERROR, code);
+ should_abort = B_TRUE;
+ }
+ if (should_abort) {
+ /* Ignore subsequent events */
+ total = index + 1;
+ break;
+ }
+ }
+
+ /*
+ * Now that the hardware has most likely moved onto dma'ing
+ * into the next cache line, clear the processed events. Take
+ * care to only clear out events that we've processed
+ */
+ EFX_SET_QWORD(ev[0]);
+ offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
+ for (index = 0; index < total; ++index) {
+ EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0]));
+ offset += sizeof (efx_qword_t);
+ }
+
+ count += total;
+
+ } while (total == batch);
+
+ *countp = count;
+}
+
+ void
+efx_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ EFSYS_ASSERT(eevop != NULL &&
+ eevop->eevo_qpost != NULL);
+
+ eevop->eevo_qpost(eep, data);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_usecs_to_ticks(
+ __in efx_nic_t *enp,
+ __in unsigned int us,
+ __out unsigned int *ticksp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ unsigned int ticks;
+
+ /* Convert microseconds to a timer tick count */
+ if (us == 0)
+ ticks = 0;
+ else if (us * 1000 < encp->enc_evq_timer_quantum_ns)
+ ticks = 1; /* Never round down to zero */
+ else
+ ticks = us * 1000 / encp->enc_evq_timer_quantum_ns;
+
+ *ticksp = ticks;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
+ EFX_EVQ_FLAGS_NOTIFY_DISABLED) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = eevop->eevo_qmoderate(eep, us)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+
+{ efx_nic_t *enp = eep->ee_enp;
+ const efx_ev_ops_t *eevop = enp->en_eevop;
+
+ EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
+
+ eevop->eevo_qstats_update(eep, stat);
+}
+
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_ev_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Program the event queue for receive and transmit queue
+ * flush events.
+ */
+ EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword);
+
+ return (0);
+
+}
+
+static __checkReturn boolean_t
+siena_ev_rx_not_ok(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in uint32_t label,
+ __in uint32_t id,
+ __inout uint16_t *flagsp)
+{
+ boolean_t ignore = B_FALSE;
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC);
+ EFSYS_PROBE(tobe_disc);
+ /*
+ * Assume this is a unicast address mismatch, unless below
+ * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or
+ * EV_RX_PAUSE_FRM_ERR is set.
+ */
+ (*flagsp) |= EFX_ADDR_MISMATCH;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) {
+ EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id);
+ EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
+ (*flagsp) |= EFX_DISCARD;
+
+#if EFSYS_OPT_RX_SCATTER
+ /*
+ * Lookout for payload queue ran dry errors and ignore them.
+ *
+ * Sadly for the header/data split cases, the descriptor
+ * pointer in this event refers to the header queue and
+ * therefore cannot be easily detected as duplicate.
+ * So we drop these and rely on the receive processing seeing
+ * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard
+ * the partially received packet.
+ */
+ if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) &&
+ (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0))
+ ignore = B_TRUE;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
+ EFSYS_PROBE(crc_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR);
+ EFSYS_PROBE(pause_frm_err);
+ (*flagsp) &= ~EFX_ADDR_MISMATCH;
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR);
+ EFSYS_PROBE(owner_id_err);
+ (*flagsp) |= EFX_DISCARD;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
+ EFSYS_PROBE(ipv4_err);
+ (*flagsp) &= ~EFX_CKSUM_IPV4;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
+ EFSYS_PROBE(udp_chk_err);
+ (*flagsp) &= ~EFX_CKSUM_TCPUDP;
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR);
+
+ /*
+ * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This
+ * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error
+ * condition.
+ */
+ (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP);
+ }
+
+ return (ignore);
+}
+
+static __checkReturn boolean_t
+siena_ev_rx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t size;
+ uint32_t label;
+ boolean_t ok;
+#if EFSYS_OPT_RX_SCATTER
+ boolean_t sop;
+ boolean_t jumbo_cont;
+#endif /* EFSYS_OPT_RX_SCATTER */
+ uint32_t hdr_type;
+ boolean_t is_v6;
+ uint16_t flags;
+ boolean_t ignore;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_RX);
+
+ /* Basic packet information */
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR);
+ size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL);
+ ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0);
+
+#if EFSYS_OPT_RX_SCATTER
+ sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0);
+ jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE);
+
+ is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0);
+
+ /*
+ * If packet is marked as OK and packet type is TCP/IP or
+ * UDP/IP or other IP, then we can rely on the hardware checksums.
+ */
+ switch (hdr_type) {
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP;
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
+ flags |= EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
+ flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ if (is_v6) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
+ flags = EFX_PKT_IPV6;
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
+ flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4;
+ }
+ break;
+
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
+ flags = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ flags = 0;
+ break;
+ }
+
+#if EFSYS_OPT_RX_SCATTER
+ /* Report scatter and header/lookahead split buffer flags */
+ if (sop)
+ flags |= EFX_PKT_START;
+ if (jumbo_cont)
+ flags |= EFX_PKT_CONT;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */
+ if (!ok) {
+ ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags);
+ if (ignore) {
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ return (B_FALSE);
+ }
+ }
+
+ /* If we're not discarding the packet then it is ok */
+ if (~flags & EFX_DISCARD)
+ EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
+
+ /* Detect multicast packets that didn't match the filter */
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) {
+ EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH);
+ } else {
+ EFSYS_PROBE(mcast_mismatch);
+ flags |= EFX_ADDR_MISMATCH;
+ }
+ } else {
+ flags |= EFX_PKT_UNICAST;
+ }
+
+ /*
+ * The packet parser in Siena can abort parsing packets under
+ * certain error conditions, setting the PKT_NOT_PARSED bit
+ * (which clears PKT_OK). If this is set, then don't trust
+ * the PKT_TYPE field.
+ */
+ if (!ok) {
+ uint32_t parse_err;
+
+ parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED);
+ if (parse_err != 0)
+ flags |= EFX_CHECK_VLAN;
+ }
+
+ if (~flags & EFX_CHECK_VLAN) {
+ uint32_t pkt_type;
+
+ pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE);
+ if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN)
+ flags |= EFX_PKT_VLAN_TAGGED;
+ }
+
+ EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id,
+ uint32_t, size, uint16_t, flags);
+
+ EFSYS_ASSERT(eecp->eec_rx != NULL);
+ should_abort = eecp->eec_rx(arg, label, id, size, flags);
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_tx(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t id;
+ uint32_t label;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 &&
+ EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) {
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR);
+ label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL);
+
+ EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
+
+ EFSYS_ASSERT(eecp->eec_tx != NULL);
+ should_abort = eecp->eec_tx(arg, label, id);
+
+ return (should_abort);
+ }
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0)
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG);
+
+ if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0)
+ EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL);
+
+ EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED);
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_global(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ _NOTE(ARGUNUSED(eqp, eecp, arg))
+
+ EFX_EV_QSTAT_INCR(eep, EV_GLOBAL);
+
+ return (B_FALSE);
+}
+
+static __checkReturn boolean_t
+siena_ev_driver(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
+ should_abort = B_FALSE;
+
+ switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV: {
+ uint32_t txq_index;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
+
+ txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
+
+ EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
+ should_abort = eecp->eec_txq_flush_done(arg, txq_index);
+
+ break;
+ }
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV: {
+ uint32_t rxq_index;
+ uint32_t failed;
+
+ rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+
+ EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
+ EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL);
+
+ if (failed) {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED);
+
+ EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_failed(arg,
+ rxq_index);
+ } else {
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
+
+ EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
+
+ should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
+ }
+
+ break;
+ }
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ EFSYS_ASSERT(eecp->eec_initialized != NULL);
+ should_abort = eecp->eec_initialized(arg);
+
+ break;
+
+ case FSE_AZ_EVQ_NOT_EN_EV:
+ EFSYS_PROBE(evq_not_en);
+ break;
+
+ case FSE_AZ_SRM_UPD_DONE_EV: {
+ uint32_t code;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE);
+
+ code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_sram != NULL);
+ should_abort = eecp->eec_sram(arg, code);
+
+ break;
+ }
+ case FSE_AZ_WAKE_UP_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_wake_up != NULL);
+ should_abort = eecp->eec_wake_up(arg, id);
+
+ break;
+ }
+ case FSE_AZ_TX_PKT_NON_TCP_UDP:
+ EFSYS_PROBE(tx_pkt_non_tcp_udp);
+ break;
+
+ case FSE_AZ_TIMER_EV: {
+ uint32_t id;
+
+ id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ EFSYS_ASSERT(eecp->eec_timer != NULL);
+ should_abort = eecp->eec_timer(arg, id);
+
+ break;
+ }
+ case FSE_AZ_RX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR);
+
+ EFSYS_PROBE(rx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_RX_DSC_ERROR, 0);
+
+ break;
+
+ case FSE_AZ_TX_DSC_ERROR_EV:
+ EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR);
+
+ EFSYS_PROBE(tx_dsc_error);
+
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_TX_DSC_ERROR, 0);
+
+ break;
+
+ default:
+ break;
+ }
+
+ return (should_abort);
+}
+
+static __checkReturn boolean_t
+siena_ev_drv_gen(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ uint32_t data;
+ boolean_t should_abort;
+
+ EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
+
+ data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0);
+ if (data >= ((uint32_t)1 << 16)) {
+ EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
+ uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
+ return (B_TRUE);
+ }
+
+ EFSYS_ASSERT(eecp->eec_software != NULL);
+ should_abort = eecp->eec_software(arg, (uint16_t)data);
+
+ return (should_abort);
+}
+
+#if EFSYS_OPT_MCDI
+
+static __checkReturn boolean_t
+siena_ev_mcdi(
+ __in efx_evq_t *eep,
+ __in efx_qword_t *eqp,
+ __in const efx_ev_callbacks_t *eecp,
+ __in_opt void *arg)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ unsigned int code;
+ boolean_t should_abort = B_FALSE;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ goto out;
+
+ EFSYS_ASSERT(eecp->eec_link_change != NULL);
+ EFSYS_ASSERT(eecp->eec_exception != NULL);
+#if EFSYS_OPT_MON_STATS
+ EFSYS_ASSERT(eecp->eec_monitor != NULL);
+#endif
+
+ EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
+
+ code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
+ switch (code) {
+ case MCDI_EVENT_CODE_BADSSERT:
+ efx_mcdi_ev_death(enp, EINTR);
+ break;
+
+ case MCDI_EVENT_CODE_CMDDONE:
+ efx_mcdi_ev_cpl(enp,
+ MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
+ MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
+ MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
+ break;
+
+ case MCDI_EVENT_CODE_LINKCHANGE: {
+ efx_link_mode_t link_mode;
+
+ siena_phy_link_ev(enp, eqp, &link_mode);
+ should_abort = eecp->eec_link_change(arg, link_mode);
+ break;
+ }
+ case MCDI_EVENT_CODE_SENSOREVT: {
+#if EFSYS_OPT_MON_STATS
+ efx_mon_stat_t id;
+ efx_mon_stat_value_t value;
+ efx_rc_t rc;
+
+ if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0)
+ should_abort = eecp->eec_monitor(arg, id, value);
+ else if (rc == ENOTSUP) {
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_SENSOREVT,
+ MCDI_EV_FIELD(eqp, DATA));
+ } else
+ EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
+#else
+ should_abort = B_FALSE;
+#endif
+ break;
+ }
+ case MCDI_EVENT_CODE_SCHEDERR:
+ /* Informational only */
+ break;
+
+ case MCDI_EVENT_CODE_REBOOT:
+ efx_mcdi_ev_death(enp, EIO);
+ break;
+
+ case MCDI_EVENT_CODE_MAC_STATS_DMA:
+#if EFSYS_OPT_MAC_STATS
+ if (eecp->eec_mac_stats != NULL) {
+ eecp->eec_mac_stats(arg,
+ MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
+ }
+#endif
+ break;
+
+ case MCDI_EVENT_CODE_FWALERT: {
+ uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
+
+ if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_FWALERT_SRAM,
+ MCDI_EV_FIELD(eqp, FWALERT_DATA));
+ else
+ should_abort = eecp->eec_exception(arg,
+ EFX_EXCEPTION_UNKNOWN_FWALERT,
+ MCDI_EV_FIELD(eqp, DATA));
+ break;
+ }
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, code);
+ break;
+ }
+
+out:
+ return (should_abort);
+}
+
+#endif /* EFSYS_OPT_MCDI */
+
+static __checkReturn efx_rc_t
+siena_ev_qprime(
+ __in efx_evq_t *eep,
+ __in unsigned int count)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ uint32_t rptr;
+ efx_dword_t dword;
+
+ rptr = count & eep->ee_mask;
+
+ EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr);
+
+ EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index,
+ &dword, B_FALSE);
+
+ return (0);
+}
+
+static void
+siena_ev_qpost(
+ __in efx_evq_t *eep,
+ __in uint16_t data)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_qword_t ev;
+ efx_oword_t oword;
+
+ EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_EV_DATA_DW0, (uint32_t)data);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index,
+ EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0),
+ EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1));
+
+ EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qmoderate(
+ __in efx_evq_t *eep,
+ __in unsigned int us)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ unsigned int locked;
+ efx_dword_t dword;
+ efx_rc_t rc;
+
+ if (us > encp->enc_evq_timer_max_us) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* If the value is zero then disable the timer */
+ if (us == 0) {
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS,
+ FRF_CZ_TC_TIMER_VAL, 0);
+ } else {
+ unsigned int ticks;
+
+ if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
+ goto fail2;
+
+ EFSYS_ASSERT(ticks > 0);
+ EFX_POPULATE_DWORD_2(dword,
+ FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF,
+ FRF_CZ_TC_TIMER_VAL, ticks - 1);
+ }
+
+ locked = (eep->ee_index == 0) ? 1 : 0;
+
+ EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0,
+ eep->ee_index, &dword, locked);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_ev_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint32_t us,
+ __in uint32_t flags,
+ __in efx_evq_t *eep)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size;
+ efx_oword_t oword;
+ efx_rc_t rc;
+ boolean_t notify_mode;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MAXNEVS));
+ EFX_STATIC_ASSERT(ISP2(EFX_EVQ_MINNEVS));
+
+ if (!ISP2(n) || (n < EFX_EVQ_MINNEVS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_evq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+#if EFSYS_OPT_RX_SCALE
+ if (enp->en_intr.ei_type == EFX_INTR_LINE &&
+ index >= EFX_MAXRSS_LEGACY) {
+ rc = EINVAL;
+ goto fail3;
+ }
+#endif
+ for (size = 0; (1 << size) <= (EFX_EVQ_MAXNEVS / EFX_EVQ_MINNEVS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_EVQ_MINNEVS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the handler table */
+ eep->ee_rx = siena_ev_rx;
+ eep->ee_tx = siena_ev_tx;
+ eep->ee_driver = siena_ev_driver;
+ eep->ee_global = siena_ev_global;
+ eep->ee_drv_gen = siena_ev_drv_gen;
+#if EFSYS_OPT_MCDI
+ eep->ee_mcdi = siena_ev_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) !=
+ EFX_EVQ_FLAGS_NOTIFY_INTERRUPT);
+
+ /* Set up the new event queue */
+ EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, notify_mode,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size,
+ FRF_AZ_EVQ_BUF_BASE_ID, id);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE);
+
+ /* Set initial interrupt moderation */
+ siena_ev_qmoderate(eep, us);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+#if EFSYS_OPT_RX_SCALE
+fail3:
+ EFSYS_PROBE(fail3);
+#endif
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock c0f3bc5083b40532 */
+static const char * const __efx_ev_qstat_name[] = {
+ "all",
+ "rx",
+ "rx_ok",
+ "rx_frm_trunc",
+ "rx_tobe_disc",
+ "rx_pause_frm_err",
+ "rx_buf_owner_id_err",
+ "rx_ipv4_hdr_chksum_err",
+ "rx_tcp_udp_chksum_err",
+ "rx_eth_crc_err",
+ "rx_ip_frag_err",
+ "rx_mcast_pkt",
+ "rx_mcast_hash_match",
+ "rx_tcp_ipv4",
+ "rx_tcp_ipv6",
+ "rx_udp_ipv4",
+ "rx_udp_ipv6",
+ "rx_other_ipv4",
+ "rx_other_ipv6",
+ "rx_non_ip",
+ "rx_batch",
+ "tx",
+ "tx_wq_ff_full",
+ "tx_pkt_err",
+ "tx_pkt_too_big",
+ "tx_unexpected",
+ "global",
+ "global_mnt",
+ "driver",
+ "driver_srm_upd_done",
+ "driver_tx_descq_fls_done",
+ "driver_rx_descq_fls_done",
+ "driver_rx_descq_fls_failed",
+ "driver_rx_dsc_error",
+ "driver_tx_dsc_error",
+ "drv_gen",
+ "mcdi_response",
+};
+/* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */
+
+ const char *
+efx_ev_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, EV_NQSTATS);
+
+ return (__efx_ev_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_ev_qstats_update(
+ __in efx_evq_t *eep,
+ __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < EV_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
+ eep->ee_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_ev_qdestroy(
+ __in efx_evq_t *eep)
+{
+ efx_nic_t *enp = eep->ee_enp;
+ efx_oword_t oword;
+
+ /* Purge event queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL,
+ eep->ee_index, &oword, B_TRUE);
+
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE);
+}
+
+static void
+siena_ev_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_filter.c b/drivers/net/sfc/base/efx_filter.c
new file mode 100644
index 00000000..ba310260
--- /dev/null
+++ b/drivers/net/sfc/base/efx_filter.c
@@ -0,0 +1,1424 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_FILTER
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace);
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec);
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_filter_ops_t __efx_filter_siena_ops = {
+ siena_filter_init, /* efo_init */
+ siena_filter_fini, /* efo_fini */
+ siena_filter_restore, /* efo_restore */
+ siena_filter_add, /* efo_add */
+ siena_filter_delete, /* efo_delete */
+ siena_filter_supported_filters, /* efo_supported_filters */
+ NULL, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_filter_ops_t __efx_filter_ef10_ops = {
+ ef10_filter_init, /* efo_init */
+ ef10_filter_fini, /* efo_fini */
+ ef10_filter_restore, /* efo_restore */
+ ef10_filter_add, /* efo_add */
+ ef10_filter_delete, /* efo_delete */
+ ef10_filter_supported_filters, /* efo_supported_filters */
+ ef10_filter_reconfigure, /* efo_reconfigure */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_filter_insert(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ return (efop->efo_add(enp, spec, B_FALSE));
+}
+
+ __checkReturn efx_rc_t
+efx_filter_remove(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ const efx_filter_ops_t *efop = enp->en_efop;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3U(spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+#if EFSYS_OPT_RX_SCALE
+ spec->efs_rss_context = enp->en_rss_context;
+#endif
+
+ return (efop->efo_delete(enp, spec));
+}
+
+ __checkReturn efx_rc_t
+efx_filter_restore(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if ((rc = enp->en_efop->efo_restore(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_init(
+ __in efx_nic_t *enp)
+{
+ const efx_filter_ops_t *efop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_FILTER));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ efop = &__efx_filter_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ efop = &__efx_filter_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efop->efo_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_efop = efop;
+ enp->en_mod_flags |= EFX_MOD_FILTER;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+ return (rc);
+}
+
+ void
+efx_filter_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ enp->en_efop->efo_fini(enp);
+
+ enp->en_efop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_FILTER;
+}
+
+/*
+ * Query the possible combinations of match flags which can be filtered on.
+ * These are returned as a list, of which each 32 bit element is a bitmask
+ * formed of EFX_FILTER_MATCH flags.
+ *
+ * The combinations are ordered in priority from highest to lowest.
+ *
+ * If the provided buffer is too short to hold the list, the call with fail with
+ * ENOSPC and *list_lengthp will be set to the buffer length required.
+ */
+ __checkReturn efx_rc_t
+efx_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+ EFSYS_ASSERT(enp->en_efop->efo_supported_filters != NULL);
+
+ if (buffer == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ rc = enp->en_efop->efo_supported_filters(enp, buffer, buffer_length,
+ list_lengthp);
+ if (rc != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_FILTER);
+
+ if (enp->en_efop->efo_reconfigure != NULL) {
+ if ((rc = enp->en_efop->efo_reconfigure(enp, mac_addr,
+ all_unicst, mulcst,
+ all_mulcst, brdcst,
+ addrs, count)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_filter_spec_init_rx(
+ __out efx_filter_spec_t *spec,
+ __in efx_filter_priority_t priority,
+ __in efx_filter_flags_t flags,
+ __in efx_rxq_t *erp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(erp, !=, NULL);
+ EFSYS_ASSERT((flags & ~(EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_SCATTER)) == 0);
+
+ memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = priority;
+ spec->efs_flags = EFX_FILTER_FLAG_RX | flags;
+ spec->efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+ spec->efs_dmaq_id = (uint16_t)erp->er_index;
+}
+
+ void
+efx_filter_spec_init_tx(
+ __out efx_filter_spec_t *spec,
+ __in efx_txq_t *etp)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(etp, !=, NULL);
+
+ memset(spec, 0, sizeof (*spec));
+ spec->efs_priority = EFX_FILTER_PRI_REQUIRED;
+ spec->efs_flags = EFX_FILTER_FLAG_TX;
+ spec->efs_dmaq_id = (uint16_t)etp->et_index;
+}
+
+
+/*
+ * Specify IPv4 host, transport protocol and port in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t host,
+ __in uint16_t port)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = host;
+ spec->efs_loc_port = port;
+ return (0);
+}
+
+/*
+ * Specify IPv4 hosts, transport protocol and ports in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_ipv4_full(
+ __inout efx_filter_spec_t *spec,
+ __in uint8_t proto,
+ __in uint32_t lhost,
+ __in uint16_t lport,
+ __in uint32_t rhost,
+ __in uint16_t rport)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->efs_ether_type = EFX_ETHER_TYPE_IPV4;
+ spec->efs_ip_proto = proto;
+ spec->efs_loc_host.eo_u32[0] = lhost;
+ spec->efs_loc_port = lport;
+ spec->efs_rem_host.eo_u32[0] = rhost;
+ spec->efs_rem_port = rport;
+ return (0);
+}
+
+/*
+ * Specify local Ethernet address and/or VID in filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_eth_local(
+ __inout efx_filter_spec_t *spec,
+ __in uint16_t vid,
+ __in const uint8_t *addr)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+ EFSYS_ASSERT3P(addr, !=, NULL);
+
+ if (vid == EFX_FILTER_SPEC_VID_UNSPEC && addr == NULL)
+ return (EINVAL);
+
+ if (vid != EFX_FILTER_SPEC_VID_UNSPEC) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->efs_outer_vid = vid;
+ }
+ if (addr != NULL) {
+ spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->efs_loc_mac, addr, EFX_MAC_ADDR_LEN);
+ }
+ return (0);
+}
+
+/*
+ * Specify matching otherwise-unmatched unicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_uc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
+ return (0);
+}
+
+/*
+ * Specify matching otherwise-unmatched multicast in a filter specification
+ */
+__checkReturn efx_rc_t
+efx_filter_spec_set_mc_def(
+ __inout efx_filter_spec_t *spec)
+{
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ spec->efs_match_flags |= EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ return (0);
+}
+
+
+
+#if EFSYS_OPT_SIENA
+
+/*
+ * "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define FILTER_CTL_SRCH_FUDGE_WILD 3
+#define FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/*
+ * Hard maximum hop limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_filter_search() when the
+ * table is full.
+ */
+#define FILTER_CTL_SRCH_MAX 200
+
+static __checkReturn efx_rc_t
+siena_filter_spec_from_gen_spec(
+ __out siena_filter_spec_t *sf_spec,
+ __in efx_filter_spec_t *gen_spec)
+{
+ efx_rc_t rc;
+ boolean_t is_full = B_FALSE;
+
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX)
+ EFSYS_ASSERT3U(gen_spec->efs_flags, ==, EFX_FILTER_FLAG_TX);
+ else
+ EFSYS_ASSERT3U(gen_spec->efs_flags, &, EFX_FILTER_FLAG_RX);
+
+ /* Falconsiena only has one RSS context */
+ if ((gen_spec->efs_flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->efs_rss_context != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ sf_spec->sfs_flags = gen_spec->efs_flags;
+ sf_spec->sfs_dmaq_id = gen_spec->efs_dmaq_id;
+
+ switch (gen_spec->efs_match_flags) {
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT:
+ is_full = B_TRUE;
+ /* Fall through */
+ case EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT: {
+ uint32_t rhost, host1, host2;
+ uint16_t rport, port1, port2;
+
+ if (gen_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if (gen_spec->efs_loc_port == 0 ||
+ (is_full && gen_spec->efs_rem_port == 0)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+ switch (gen_spec->efs_ip_proto) {
+ case EFX_IPPROTO_TCP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_TCP_FULL :
+ EFX_SIENA_FILTER_TX_TCP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_TCP_FULL :
+ EFX_SIENA_FILTER_RX_TCP_WILD);
+ }
+ break;
+ case EFX_IPPROTO_UDP:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_UDP_FULL :
+ EFX_SIENA_FILTER_TX_UDP_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_UDP_FULL :
+ EFX_SIENA_FILTER_RX_UDP_WILD);
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail4;
+ }
+ /*
+ * The filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * addresses (zero for a wildcard).
+ */
+ rhost = is_full ? gen_spec->efs_rem_host.eo_u32[0] : 0;
+ rport = is_full ? gen_spec->efs_rem_port : 0;
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ host1 = gen_spec->efs_loc_host.eo_u32[0];
+ host2 = rhost;
+ } else {
+ host1 = rhost;
+ host2 = gen_spec->efs_loc_host.eo_u32[0];
+ }
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_TX_UDP_WILD) {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ } else {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ }
+ } else {
+ if (sf_spec->sfs_type ==
+ EFX_SIENA_FILTER_RX_UDP_WILD) {
+ port1 = gen_spec->efs_loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->efs_loc_port;
+ }
+ }
+ sf_spec->sfs_dword[0] = (host1 << 16) | port1;
+ sf_spec->sfs_dword[1] = (port2 << 16) | (host1 >> 16);
+ sf_spec->sfs_dword[2] = host2;
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = B_TRUE;
+ /* Fall through */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ if (gen_spec->efs_flags & EFX_FILTER_FLAG_TX) {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_TX_MAC_FULL :
+ EFX_SIENA_FILTER_TX_MAC_WILD);
+ } else {
+ sf_spec->sfs_type = (is_full ?
+ EFX_SIENA_FILTER_RX_MAC_FULL :
+ EFX_SIENA_FILTER_RX_MAC_WILD);
+ }
+ sf_spec->sfs_dword[0] = is_full ? gen_spec->efs_outer_vid : 0;
+ sf_spec->sfs_dword[1] =
+ gen_spec->efs_loc_mac[2] << 24 |
+ gen_spec->efs_loc_mac[3] << 16 |
+ gen_spec->efs_loc_mac[4] << 8 |
+ gen_spec->efs_loc_mac[5];
+ sf_spec->sfs_dword[2] =
+ gen_spec->efs_loc_mac[0] << 8 |
+ gen_spec->efs_loc_mac[1];
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail5;
+ }
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple.
+ */
+static uint16_t
+siena_filter_tbl_hash(
+ __in uint32_t key)
+{
+ uint16_t tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ (uint16_t)(key >> 16);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ (uint16_t)(key & 0xffff);
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+
+ return (tmp);
+}
+
+/*
+ * To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash.
+ */
+static uint16_t
+siena_filter_tbl_increment(
+ __in uint32_t key)
+{
+ return ((uint16_t)(key * 2 - 1));
+}
+
+static __checkReturn boolean_t
+siena_filter_test_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ return ((sftp->sft_bitmap[index / 32] & (1 << (index % 32))) != 0);
+}
+
+static void
+siena_filter_set_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] |= (1 << (index % 32));
+ ++sftp->sft_used;
+}
+
+static void
+siena_filter_clear_used(
+ __in siena_filter_tbl_t *sftp,
+ __in unsigned int index)
+{
+ EFSYS_ASSERT3P(sftp->sft_bitmap, !=, NULL);
+ sftp->sft_bitmap[index / 32] &= ~(1 << (index % 32));
+
+ --sftp->sft_used;
+ EFSYS_ASSERT3U(sftp->sft_used, >=, 0);
+}
+
+
+static siena_filter_tbl_id_t
+siena_filter_tbl_id(
+ __in siena_filter_type_t type)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_RX_MAC;
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_IP;
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ tbl_id = EFX_SIENA_FILTER_TBL_TX_MAC;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ tbl_id = EFX_SIENA_FILTER_NTBLS;
+ break;
+ }
+ return (tbl_id);
+}
+
+static void
+siena_filter_reset_search_depth(
+ __inout siena_filter_t *sfp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ switch (tbl_id) {
+ case EFX_SIENA_FILTER_TBL_RX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_IP:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] = 0;
+ break;
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC:
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] = 0;
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] = 0;
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+static void
+siena_filter_push_rx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TCP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_FULL_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_UDP_WILD_SRCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC].sft_size) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ sfp->sf_depth[EFX_SIENA_FILTER_RX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_filter_push_tx_limits(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_TCP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(oword,
+ FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_UDP_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ if (sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC].sft_size != 0) {
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_FULL] +
+ FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ oword, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ sfp->sf_depth[EFX_SIENA_FILTER_TX_MAC_WILD] +
+ FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static __checkReturn uint32_t
+siena_filter_build(
+ __out efx_oword_t *filter,
+ __in siena_filter_spec_t *spec)
+{
+ uint32_t dword3;
+ uint32_t key;
+ uint8_t type = spec->sfs_type;
+ uint32_t flags = spec->sfs_flags;
+
+ switch (siena_filter_tbl_id(type)) {
+ case EFX_SIENA_FILTER_TBL_RX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_RX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_RX_UDP_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_BZ_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_BZ_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_AZ_TCP_UDP, is_udp,
+ FRF_AZ_RXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_RX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_RX_MAC_WILD);
+ EFX_POPULATE_OWORD_7(*filter,
+ FRF_CZ_RMFT_RSS_EN,
+ (flags & EFX_FILTER_FLAG_RX_RSS) ? 1 : 0,
+ FRF_CZ_RMFT_SCATTER_EN,
+ (flags & EFX_FILTER_FLAG_RX_SCATTER) ? 1 : 0,
+ FRF_CZ_RMFT_RXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_RMFT_DEST_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_IP: {
+ boolean_t is_udp = (type == EFX_SIENA_FILTER_TX_UDP_FULL ||
+ type == EFX_SIENA_FILTER_TX_UDP_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TIFT_TCP_UDP, is_udp,
+ FRF_CZ_TIFT_TXQ_ID, spec->sfs_dmaq_id,
+ EFX_DWORD_2, spec->sfs_dword[2],
+ EFX_DWORD_1, spec->sfs_dword[1],
+ EFX_DWORD_0, spec->sfs_dword[0]);
+ dword3 = is_udp | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ case EFX_SIENA_FILTER_TBL_TX_MAC: {
+ boolean_t is_wild = (type == EFX_SIENA_FILTER_TX_MAC_WILD);
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->sfs_dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_DW1, spec->sfs_dword[2],
+ FRF_CZ_TMFT_SRC_MAC_DW0, spec->sfs_dword[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->sfs_dword[0]);
+ dword3 = is_wild | spec->sfs_dmaq_id << 1;
+ break;
+ }
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ return (0);
+ }
+
+ key =
+ spec->sfs_dword[0] ^
+ spec->sfs_dword[1] ^
+ spec->sfs_dword[2] ^
+ dword3;
+
+ return (key);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_push_entry(
+ __inout efx_nic_t *enp,
+ __in siena_filter_type_t type,
+ __in int index,
+ __in efx_oword_t *eop)
+{
+ efx_rc_t rc;
+
+ switch (type) {
+ case EFX_SIENA_FILTER_RX_TCP_FULL:
+ case EFX_SIENA_FILTER_RX_TCP_WILD:
+ case EFX_SIENA_FILTER_RX_UDP_FULL:
+ case EFX_SIENA_FILTER_RX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_RX_MAC_FULL:
+ case EFX_SIENA_FILTER_RX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_RX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_TCP_FULL:
+ case EFX_SIENA_FILTER_TX_TCP_WILD:
+ case EFX_SIENA_FILTER_TX_UDP_FULL:
+ case EFX_SIENA_FILTER_TX_UDP_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ case EFX_SIENA_FILTER_TX_MAC_FULL:
+ case EFX_SIENA_FILTER_TX_MAC_WILD:
+ EFX_BAR_TBL_WRITEO(enp, FR_CZ_TX_MAC_FILTER_TBL0, index,
+ eop, B_TRUE);
+ break;
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ return (0);
+
+fail1:
+ return (rc);
+}
+
+
+static __checkReturn boolean_t
+siena_filter_equal(
+ __in const siena_filter_spec_t *left,
+ __in const siena_filter_spec_t *right)
+{
+ siena_filter_tbl_id_t tbl_id;
+
+ tbl_id = siena_filter_tbl_id(left->sfs_type);
+
+
+ if (left->sfs_type != right->sfs_type)
+ return (B_FALSE);
+
+ if (memcmp(left->sfs_dword, right->sfs_dword,
+ sizeof (left->sfs_dword)))
+ return (B_FALSE);
+
+ if ((tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC) &&
+ left->sfs_dmaq_id != right->sfs_dmaq_id)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_search(
+ __in siena_filter_tbl_t *sftp,
+ __in siena_filter_spec_t *spec,
+ __in uint32_t key,
+ __in boolean_t for_insert,
+ __out int *filter_index,
+ __out unsigned int *depth_required)
+{
+ unsigned int hash, incr, filter_idx, depth;
+
+ hash = siena_filter_tbl_hash(key);
+ incr = siena_filter_tbl_increment(key);
+
+ filter_idx = hash & (sftp->sft_size - 1);
+ depth = 1;
+
+ for (;;) {
+ /*
+ * Return success if entry is used and matches this spec
+ * or entry is unused and we are trying to insert.
+ */
+ if (siena_filter_test_used(sftp, filter_idx) ?
+ siena_filter_equal(spec,
+ &sftp->sft_spec[filter_idx]) :
+ for_insert) {
+ *filter_index = filter_idx;
+ *depth_required = depth;
+ return (0);
+ }
+
+ /* Return failure if we reached the maximum search depth */
+ if (depth == FILTER_CTL_SRCH_MAX)
+ return (for_insert ? EBUSY : ENOENT);
+
+ filter_idx = (filter_idx + incr) & (sftp->sft_size - 1);
+ ++depth;
+ }
+}
+
+static void
+siena_filter_clear_entry(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_t *sftp,
+ __in int index)
+{
+ efx_oword_t filter;
+
+ if (siena_filter_test_used(sftp, index)) {
+ siena_filter_clear_used(sftp, index);
+
+ EFX_ZERO_OWORD(filter);
+ siena_filter_push_entry(enp,
+ sftp->sft_spec[index].sfs_type,
+ index, &filter);
+
+ memset(&sftp->sft_spec[index],
+ 0, sizeof (sftp->sft_spec[0]));
+ }
+}
+
+ void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl_id)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ int index;
+ efsys_lock_state_t state;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (index = 0; index < sftp->sft_size; ++index) {
+ siena_filter_clear_entry(enp, sftp, index);
+ }
+
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_init(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp;
+ siena_filter_tbl_t *sftp;
+ int tbl_id;
+ efx_rc_t rc;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (siena_filter_t), sfp);
+
+ if (!sfp) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_filter.ef_siena_filter = sfp;
+
+ switch (enp->en_family) {
+ case EFX_FAMILY_SIENA:
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_IP];
+ sftp->sft_size = FR_AZ_RX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_RX_MAC];
+ sftp->sft_size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_IP];
+ sftp->sft_size = FR_CZ_TX_FILTER_TBL0_ROWS;
+
+ sftp = &sfp->sf_tbl[EFX_SIENA_FILTER_TBL_TX_MAC];
+ sftp->sft_size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ break;
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ unsigned int bitmap_size;
+
+ sftp = &sfp->sf_tbl[tbl_id];
+ if (sftp->sft_size == 0)
+ continue;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, bitmap_size, sftp->sft_bitmap);
+ if (!sftp->sft_bitmap) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ sftp->sft_size * sizeof (*sftp->sft_spec),
+ sftp->sft_spec);
+ if (!sftp->sft_spec) {
+ rc = ENOMEM;
+ goto fail4;
+ }
+ memset(sftp->sft_spec, 0,
+ sftp->sft_size * sizeof (*sftp->sft_spec));
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ siena_filter_fini(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static void
+siena_filter_fini(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (sfp == NULL)
+ return;
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ siena_filter_tbl_t *sftp = &sfp->sf_tbl[tbl_id];
+ unsigned int bitmap_size;
+
+ EFX_STATIC_ASSERT(sizeof (sftp->sft_bitmap[0]) ==
+ sizeof (uint32_t));
+ bitmap_size =
+ (sftp->sft_size + (sizeof (uint32_t) * 8) - 1) / 8;
+
+ if (sftp->sft_bitmap != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, bitmap_size,
+ sftp->sft_bitmap);
+ sftp->sft_bitmap = NULL;
+ }
+
+ if (sftp->sft_spec != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip, sftp->sft_size *
+ sizeof (*sftp->sft_spec), sftp->sft_spec);
+ sftp->sft_spec = NULL;
+ }
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (siena_filter_t),
+ enp->en_filter.ef_siena_filter);
+}
+
+/* Restore filter state after a reset */
+static __checkReturn efx_rc_t
+siena_filter_restore(
+ __in efx_nic_t *enp)
+{
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *spec;
+ efx_oword_t filter;
+ int filter_idx;
+ efsys_lock_state_t state;
+ uint32_t key;
+ efx_rc_t rc;
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ for (tbl_id = 0; tbl_id < EFX_SIENA_FILTER_NTBLS; tbl_id++) {
+ sftp = &sfp->sf_tbl[tbl_id];
+ for (filter_idx = 0;
+ filter_idx < sftp->sft_size;
+ filter_idx++) {
+ if (!siena_filter_test_used(sftp, filter_idx))
+ continue;
+
+ spec = &sftp->sft_spec[filter_idx];
+ if ((key = siena_filter_build(&filter, spec)) == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((rc = siena_filter_push_entry(enp,
+ spec->sfs_type, filter_idx, &filter)) != 0)
+ goto fail2;
+ }
+ }
+
+ siena_filter_push_rx_limits(enp);
+ siena_filter_push_tx_limits(enp);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_add(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec,
+ __in boolean_t may_replace)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ siena_filter_spec_t *saved_sf_spec;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ efsys_lock_state_t state;
+ uint32_t key;
+
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ if (sftp->sft_size == 0) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_TRUE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail3;
+
+ EFSYS_ASSERT3U(filter_idx, <, sftp->sft_size);
+ saved_sf_spec = &sftp->sft_spec[filter_idx];
+
+ if (siena_filter_test_used(sftp, filter_idx)) {
+ if (may_replace == B_FALSE) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ siena_filter_set_used(sftp, filter_idx);
+ *saved_sf_spec = sf_spec;
+
+ if (sfp->sf_depth[sf_spec.sfs_type] < depth) {
+ sfp->sf_depth[sf_spec.sfs_type] = depth;
+ if (tbl_id == EFX_SIENA_FILTER_TBL_TX_IP ||
+ tbl_id == EFX_SIENA_FILTER_TBL_TX_MAC)
+ siena_filter_push_tx_limits(enp);
+ else
+ siena_filter_push_rx_limits(enp);
+ }
+
+ siena_filter_push_entry(enp, sf_spec.sfs_type,
+ filter_idx, &filter);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+
+fail3:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_filter_delete(
+ __in efx_nic_t *enp,
+ __inout efx_filter_spec_t *spec)
+{
+ efx_rc_t rc;
+ siena_filter_spec_t sf_spec;
+ siena_filter_t *sfp = enp->en_filter.ef_siena_filter;
+ siena_filter_tbl_id_t tbl_id;
+ siena_filter_tbl_t *sftp;
+ efx_oword_t filter;
+ int filter_idx;
+ unsigned int depth;
+ efsys_lock_state_t state;
+ uint32_t key;
+
+ EFSYS_ASSERT3P(spec, !=, NULL);
+
+ if ((rc = siena_filter_spec_from_gen_spec(&sf_spec, spec)) != 0)
+ goto fail1;
+
+ tbl_id = siena_filter_tbl_id(sf_spec.sfs_type);
+ sftp = &sfp->sf_tbl[tbl_id];
+
+ key = siena_filter_build(&filter, &sf_spec);
+
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ rc = siena_filter_search(sftp, &sf_spec, key, B_FALSE,
+ &filter_idx, &depth);
+ if (rc != 0)
+ goto fail2;
+
+ siena_filter_clear_entry(enp, sftp, filter_idx);
+ if (sftp->sft_used == 0)
+ siena_filter_reset_search_depth(sfp, tbl_id);
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (0);
+
+fail2:
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+#define SIENA_MAX_SUPPORTED_MATCHES 4
+
+static __checkReturn efx_rc_t
+siena_filter_supported_filters(
+ __in efx_nic_t *enp,
+ __out_ecount(buffer_length) uint32_t *buffer,
+ __in size_t buffer_length,
+ __out size_t *list_lengthp)
+{
+ uint32_t index = 0;
+ uint32_t rx_matches[SIENA_MAX_SUPPORTED_MATCHES];
+ size_t list_length;
+ efx_rc_t rc;
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+
+ if (enp->en_features & EFX_FEATURE_MAC_HEADER_FILTERS) {
+ rx_matches[index++] =
+ EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC;
+
+ rx_matches[index++] = EFX_FILTER_MATCH_LOC_MAC;
+ }
+
+ EFSYS_ASSERT3U(index, <=, SIENA_MAX_SUPPORTED_MATCHES);
+ list_length = index;
+
+ *list_lengthp = list_length;
+
+ if (buffer_length < list_length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ memcpy(buffer, rx_matches, list_length * sizeof (rx_matches[0]));
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#undef MAX_SUPPORTED
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_FILTER */
diff --git a/drivers/net/sfc/base/efx_hash.c b/drivers/net/sfc/base/efx_hash.c
new file mode 100644
index 00000000..3cc0d200
--- /dev/null
+++ b/drivers/net/sfc/base/efx_hash.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright 2006 Bob Jenkins
+ *
+ * Derived from public domain source, see
+ * <http://burtleburtle.net/bob/c/lookup3.c>:
+ *
+ * "lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+ *
+ * These are functions for producing 32-bit hashes for hash table lookup...
+ * ...You can use this free for any purpose. It's in the public domain.
+ * It has no warranty."
+ *
+ * Copyright (c) 2014-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+/* Hash initial value */
+#define EFX_HASH_INITIAL_VALUE 0xdeadbeef
+
+/*
+ * Rotate a 32-bit value left
+ *
+ * Allow platform to provide an intrinsic or optimised routine and
+ * fall-back to a simple shift based implementation.
+ */
+#if EFSYS_HAS_ROTL_DWORD
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ EFSYS_ROTL_DWORD(_value, _shift)
+
+#else
+
+#define EFX_HASH_ROTATE(_value, _shift) \
+ (((_value) << (_shift)) | ((_value) >> (32 - (_shift))))
+
+#endif
+
+/* Mix three 32-bit values reversibly */
+#define EFX_HASH_MIX(_a, _b, _c) \
+ do { \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 4); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 6); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 8); \
+ _b += _a; \
+ _a -= _c; \
+ _a ^= EFX_HASH_ROTATE(_c, 16); \
+ _c += _b; \
+ _b -= _a; \
+ _b ^= EFX_HASH_ROTATE(_a, 19); \
+ _a += _c; \
+ _c -= _b; \
+ _c ^= EFX_HASH_ROTATE(_b, 4); \
+ _b += _a; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Final mixing of three 32-bit values into one (_c) */
+#define EFX_HASH_FINALISE(_a, _b, _c) \
+ do { \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 14); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 11); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 25); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 16); \
+ _a ^= _c; \
+ _a -= EFX_HASH_ROTATE(_c, 4); \
+ _b ^= _a; \
+ _b -= EFX_HASH_ROTATE(_a, 14); \
+ _c ^= _b; \
+ _c -= EFX_HASH_ROTATE(_b, 24); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+/* Produce a 32-bit hash from 32-bit aligned input */
+ __checkReturn uint32_t
+efx_hash_dwords(
+ __in_ecount(count) uint32_t const *input,
+ __in size_t count,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE +
+ (((uint32_t)count) * sizeof (uint32_t)) + init;
+
+ /* Handle all but the last three dwords of the input */
+ while (count > 3) {
+ a += input[0];
+ b += input[1];
+ c += input[2];
+ EFX_HASH_MIX(a, b, c);
+
+ count -= 3;
+ input += 3;
+ }
+
+ /* Handle the left-overs */
+ switch (count) {
+ case 3:
+ c += input[2];
+ /* Fall-through */
+ case 2:
+ b += input[1];
+ /* Fall-through */
+ case 1:
+ a += input[0];
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if count parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#if EFSYS_IS_BIG_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]) << 24;
+ a += ((uint32_t)input[1]) << 16;
+ a += ((uint32_t)input[2]) << 8;
+ a += ((uint32_t)input[3]);
+ b += ((uint32_t)input[4]) << 24;
+ b += ((uint32_t)input[5]) << 16;
+ b += ((uint32_t)input[6]) << 8;
+ b += ((uint32_t)input[7]);
+ c += ((uint32_t)input[8]) << 24;
+ c += ((uint32_t)input[9]) << 16;
+ c += ((uint32_t)input[10]) << 8;
+ c += ((uint32_t)input[11]);
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]);
+ /* Fall-through */
+ case 11:
+ c += ((uint32_t)input[10]) << 8;
+ /* Fall-through */
+ case 10:
+ c += ((uint32_t)input[9]) << 16;
+ /* Fall-through */
+ case 9:
+ c += ((uint32_t)input[8]) << 24;
+ /* Fall-through */
+ case 8:
+ b += ((uint32_t)input[7]);
+ /* Fall-through */
+ case 7:
+ b += ((uint32_t)input[6]) << 8;
+ /* Fall-through */
+ case 6:
+ b += ((uint32_t)input[5]) << 16;
+ /* Fall-through */
+ case 5:
+ b += ((uint32_t)input[4]) << 24;
+ /* Fall-through */
+ case 4:
+ a += ((uint32_t)input[3]);
+ /* Fall-through */
+ case 3:
+ a += ((uint32_t)input[2]) << 8;
+ /* Fall-through */
+ case 2:
+ a += ((uint32_t)input[1]) << 16;
+ /* Fall-through */
+ case 1:
+ a += ((uint32_t)input[0]) << 24;
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+/* Produce a 32-bit hash from arbitrarily aligned input */
+ __checkReturn uint32_t
+efx_hash_bytes(
+ __in_ecount(length) uint8_t const *input,
+ __in size_t length,
+ __in uint32_t init)
+{
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+
+ /* Set up the initial internal state */
+ a = b = c = EFX_HASH_INITIAL_VALUE + (uint32_t)length + init;
+
+ /* Handle all but the last twelve bytes of the input */
+ while (length > 12) {
+ a += ((uint32_t)input[0]);
+ a += ((uint32_t)input[1]) << 8;
+ a += ((uint32_t)input[2]) << 16;
+ a += ((uint32_t)input[3]) << 24;
+ b += ((uint32_t)input[4]);
+ b += ((uint32_t)input[5]) << 8;
+ b += ((uint32_t)input[6]) << 16;
+ b += ((uint32_t)input[7]) << 24;
+ c += ((uint32_t)input[8]);
+ c += ((uint32_t)input[9]) << 8;
+ c += ((uint32_t)input[10]) << 16;
+ c += ((uint32_t)input[11]) << 24;
+ EFX_HASH_MIX(a, b, c);
+ length -= 12;
+ input += 12;
+ }
+
+ /* Handle the left-overs */
+ switch (length) {
+ case 12:
+ c += ((uint32_t)input[11]) << 24;
+ /* Fall-through */
+ case 11:
+ c += ((uint32_t)input[10]) << 16;
+ /* Fall-through */
+ case 10:
+ c += ((uint32_t)input[9]) << 8;
+ /* Fall-through */
+ case 9:
+ c += ((uint32_t)input[8]);
+ /* Fall-through */
+ case 8:
+ b += ((uint32_t)input[7]) << 24;
+ /* Fall-through */
+ case 7:
+ b += ((uint32_t)input[6]) << 16;
+ /* Fall-through */
+ case 6:
+ b += ((uint32_t)input[5]) << 8;
+ /* Fall-through */
+ case 5:
+ b += ((uint32_t)input[4]);
+ /* Fall-through */
+ case 4:
+ a += ((uint32_t)input[3]) << 24;
+ /* Fall-through */
+ case 3:
+ a += ((uint32_t)input[2]) << 16;
+ /* Fall-through */
+ case 2:
+ a += ((uint32_t)input[1]) << 8;
+ /* Fall-through */
+ case 1:
+ a += ((uint32_t)input[0]);
+ EFX_HASH_FINALISE(a, b, c);
+ break;
+
+ case 0:
+ /* Should only get here if length parameter was zero */
+ break;
+ }
+
+ return (c);
+}
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
diff --git a/drivers/net/sfc/base/efx_impl.h b/drivers/net/sfc/base/efx_impl.h
new file mode 100644
index 00000000..43add6d9
--- /dev/null
+++ b/drivers/net/sfc/base/efx_impl.h
@@ -0,0 +1,1208 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_IMPL_H
+#define _SYS_EFX_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+/* FIXME: Add definition for driver generated software events */
+#ifndef ESE_DZ_EV_CODE_DRV_GEN_EV
+#define ESE_DZ_EV_CODE_DRV_GEN_EV FSE_AZ_EV_CODE_DRV_GEN_EV
+#endif
+
+
+#if EFSYS_OPT_SIENA
+#include "siena_impl.h"
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+#include "hunt_impl.h"
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+#include "medford_impl.h"
+#endif /* EFSYS_OPT_MEDFORD */
+
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+#include "ef10_impl.h"
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFX_MOD_MCDI 0x00000001
+#define EFX_MOD_PROBE 0x00000002
+#define EFX_MOD_NVRAM 0x00000004
+#define EFX_MOD_VPD 0x00000008
+#define EFX_MOD_NIC 0x00000010
+#define EFX_MOD_INTR 0x00000020
+#define EFX_MOD_EV 0x00000040
+#define EFX_MOD_RX 0x00000080
+#define EFX_MOD_TX 0x00000100
+#define EFX_MOD_PORT 0x00000200
+#define EFX_MOD_MON 0x00000400
+#define EFX_MOD_FILTER 0x00001000
+#define EFX_MOD_LIC 0x00002000
+
+#define EFX_RESET_PHY 0x00000001
+#define EFX_RESET_RXQ_ERR 0x00000002
+#define EFX_RESET_TXQ_ERR 0x00000004
+
+typedef enum efx_mac_type_e {
+ EFX_MAC_INVALID = 0,
+ EFX_MAC_SIENA,
+ EFX_MAC_HUNTINGTON,
+ EFX_MAC_MEDFORD,
+ EFX_MAC_NTYPES
+} efx_mac_type_t;
+
+typedef struct efx_ev_ops_s {
+ efx_rc_t (*eevo_init)(efx_nic_t *);
+ void (*eevo_fini)(efx_nic_t *);
+ efx_rc_t (*eevo_qcreate)(efx_nic_t *, unsigned int,
+ efsys_mem_t *, size_t, uint32_t,
+ uint32_t, uint32_t, efx_evq_t *);
+ void (*eevo_qdestroy)(efx_evq_t *);
+ efx_rc_t (*eevo_qprime)(efx_evq_t *, unsigned int);
+ void (*eevo_qpost)(efx_evq_t *, uint16_t);
+ efx_rc_t (*eevo_qmoderate)(efx_evq_t *, unsigned int);
+#if EFSYS_OPT_QSTATS
+ void (*eevo_qstats_update)(efx_evq_t *, efsys_stat_t *);
+#endif
+} efx_ev_ops_t;
+
+typedef struct efx_tx_ops_s {
+ efx_rc_t (*etxo_init)(efx_nic_t *);
+ void (*etxo_fini)(efx_nic_t *);
+ efx_rc_t (*etxo_qcreate)(efx_nic_t *,
+ unsigned int, unsigned int,
+ efsys_mem_t *, size_t,
+ uint32_t, uint16_t,
+ efx_evq_t *, efx_txq_t *,
+ unsigned int *);
+ void (*etxo_qdestroy)(efx_txq_t *);
+ efx_rc_t (*etxo_qpost)(efx_txq_t *, efx_buffer_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qpush)(efx_txq_t *, unsigned int, unsigned int);
+ efx_rc_t (*etxo_qpace)(efx_txq_t *, unsigned int);
+ efx_rc_t (*etxo_qflush)(efx_txq_t *);
+ void (*etxo_qenable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_enable)(efx_txq_t *);
+ void (*etxo_qpio_disable)(efx_txq_t *);
+ efx_rc_t (*etxo_qpio_write)(efx_txq_t *, uint8_t *, size_t,
+ size_t);
+ efx_rc_t (*etxo_qpio_post)(efx_txq_t *, size_t, unsigned int,
+ unsigned int *);
+ efx_rc_t (*etxo_qdesc_post)(efx_txq_t *, efx_desc_t *,
+ unsigned int, unsigned int,
+ unsigned int *);
+ void (*etxo_qdesc_dma_create)(efx_txq_t *, efsys_dma_addr_t,
+ size_t, boolean_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso_create)(efx_txq_t *, uint16_t,
+ uint32_t, uint8_t,
+ efx_desc_t *);
+ void (*etxo_qdesc_tso2_create)(efx_txq_t *, uint16_t,
+ uint32_t, uint16_t,
+ efx_desc_t *, int);
+ void (*etxo_qdesc_vlantci_create)(efx_txq_t *, uint16_t,
+ efx_desc_t *);
+#if EFSYS_OPT_QSTATS
+ void (*etxo_qstats_update)(efx_txq_t *,
+ efsys_stat_t *);
+#endif
+} efx_tx_ops_t;
+
+typedef struct efx_rx_ops_s {
+ efx_rc_t (*erxo_init)(efx_nic_t *);
+ void (*erxo_fini)(efx_nic_t *);
+#if EFSYS_OPT_RX_SCATTER
+ efx_rc_t (*erxo_scatter_enable)(efx_nic_t *, unsigned int);
+#endif
+#if EFSYS_OPT_RX_SCALE
+ efx_rc_t (*erxo_scale_mode_set)(efx_nic_t *, efx_rx_hash_alg_t,
+ efx_rx_hash_type_t, boolean_t);
+ efx_rc_t (*erxo_scale_key_set)(efx_nic_t *, uint8_t *, size_t);
+ efx_rc_t (*erxo_scale_tbl_set)(efx_nic_t *, unsigned int *,
+ size_t);
+ uint32_t (*erxo_prefix_hash)(efx_nic_t *, efx_rx_hash_alg_t,
+ uint8_t *);
+#endif /* EFSYS_OPT_RX_SCALE */
+ efx_rc_t (*erxo_prefix_pktlen)(efx_nic_t *, uint8_t *,
+ uint16_t *);
+ void (*erxo_qpost)(efx_rxq_t *, efsys_dma_addr_t *, size_t,
+ unsigned int, unsigned int,
+ unsigned int);
+ void (*erxo_qpush)(efx_rxq_t *, unsigned int, unsigned int *);
+#if EFSYS_OPT_RX_PACKED_STREAM
+ void (*erxo_qps_update_credits)(efx_rxq_t *);
+ uint8_t * (*erxo_qps_packet_info)(efx_rxq_t *, uint8_t *,
+ uint32_t, uint32_t,
+ uint16_t *, uint32_t *, uint32_t *);
+#endif
+ efx_rc_t (*erxo_qflush)(efx_rxq_t *);
+ void (*erxo_qenable)(efx_rxq_t *);
+ efx_rc_t (*erxo_qcreate)(efx_nic_t *enp, unsigned int,
+ unsigned int, efx_rxq_type_t,
+ efsys_mem_t *, size_t, uint32_t,
+ efx_evq_t *, efx_rxq_t *);
+ void (*erxo_qdestroy)(efx_rxq_t *);
+} efx_rx_ops_t;
+
+typedef struct efx_mac_ops_s {
+ efx_rc_t (*emo_poll)(efx_nic_t *, efx_link_mode_t *);
+ efx_rc_t (*emo_up)(efx_nic_t *, boolean_t *);
+ efx_rc_t (*emo_addr_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_set)(efx_nic_t *);
+ efx_rc_t (*emo_pdu_get)(efx_nic_t *, size_t *);
+ efx_rc_t (*emo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*emo_multicast_list_set)(efx_nic_t *);
+ efx_rc_t (*emo_filter_default_rxq_set)(efx_nic_t *,
+ efx_rxq_t *, boolean_t);
+ void (*emo_filter_default_rxq_clear)(efx_nic_t *);
+#if EFSYS_OPT_LOOPBACK
+ efx_rc_t (*emo_loopback_set)(efx_nic_t *, efx_link_mode_t,
+ efx_loopback_type_t);
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ efx_rc_t (*emo_stats_get_mask)(efx_nic_t *, uint32_t *, size_t);
+ efx_rc_t (*emo_stats_clear)(efx_nic_t *);
+ efx_rc_t (*emo_stats_upload)(efx_nic_t *, efsys_mem_t *);
+ efx_rc_t (*emo_stats_periodic)(efx_nic_t *, efsys_mem_t *,
+ uint16_t, boolean_t);
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efsys_stat_t *, uint32_t *);
+#endif /* EFSYS_OPT_MAC_STATS */
+} efx_mac_ops_t;
+
+typedef struct efx_phy_ops_s {
+ efx_rc_t (*epo_power)(efx_nic_t *, boolean_t); /* optional */
+ efx_rc_t (*epo_reset)(efx_nic_t *);
+ efx_rc_t (*epo_reconfigure)(efx_nic_t *);
+ efx_rc_t (*epo_verify)(efx_nic_t *);
+ efx_rc_t (*epo_oui_get)(efx_nic_t *, uint32_t *);
+#if EFSYS_OPT_PHY_STATS
+ efx_rc_t (*epo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ uint32_t *);
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ efx_rc_t (*epo_bist_enable_offline)(efx_nic_t *);
+ efx_rc_t (*epo_bist_start)(efx_nic_t *, efx_bist_type_t);
+ efx_rc_t (*epo_bist_poll)(efx_nic_t *, efx_bist_type_t,
+ efx_bist_result_t *, uint32_t *,
+ unsigned long *, size_t);
+ void (*epo_bist_stop)(efx_nic_t *, efx_bist_type_t);
+#endif /* EFSYS_OPT_BIST */
+} efx_phy_ops_t;
+
+#if EFSYS_OPT_FILTER
+typedef struct efx_filter_ops_s {
+ efx_rc_t (*efo_init)(efx_nic_t *);
+ void (*efo_fini)(efx_nic_t *);
+ efx_rc_t (*efo_restore)(efx_nic_t *);
+ efx_rc_t (*efo_add)(efx_nic_t *, efx_filter_spec_t *,
+ boolean_t may_replace);
+ efx_rc_t (*efo_delete)(efx_nic_t *, efx_filter_spec_t *);
+ efx_rc_t (*efo_supported_filters)(efx_nic_t *, uint32_t *,
+ size_t, size_t *);
+ efx_rc_t (*efo_reconfigure)(efx_nic_t *, uint8_t const *, boolean_t,
+ boolean_t, boolean_t, boolean_t,
+ uint8_t const *, uint32_t);
+} efx_filter_ops_t;
+
+extern __checkReturn efx_rc_t
+efx_filter_reconfigure(
+ __in efx_nic_t *enp,
+ __in_ecount(6) uint8_t const *mac_addr,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in uint32_t count);
+
+#endif /* EFSYS_OPT_FILTER */
+
+
+typedef struct efx_port_s {
+ efx_mac_type_t ep_mac_type;
+ uint32_t ep_phy_type;
+ uint8_t ep_port;
+ uint32_t ep_mac_pdu;
+ uint8_t ep_mac_addr[6];
+ efx_link_mode_t ep_link_mode;
+ boolean_t ep_all_unicst;
+ boolean_t ep_mulcst;
+ boolean_t ep_all_mulcst;
+ boolean_t ep_brdcst;
+ unsigned int ep_fcntl;
+ boolean_t ep_fcntl_autoneg;
+ efx_oword_t ep_multicst_hash[2];
+ uint8_t ep_mulcst_addr_list[EFX_MAC_ADDR_LEN *
+ EFX_MAC_MULTICAST_LIST_MAX];
+ uint32_t ep_mulcst_addr_count;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t ep_loopback_type;
+ efx_link_mode_t ep_loopback_link_mode;
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_PHY_FLAGS
+ uint32_t ep_phy_flags;
+#endif /* EFSYS_OPT_PHY_FLAGS */
+#if EFSYS_OPT_PHY_LED_CONTROL
+ efx_phy_led_mode_t ep_phy_led_mode;
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+ efx_phy_media_type_t ep_fixed_port_type;
+ efx_phy_media_type_t ep_module_type;
+ uint32_t ep_adv_cap_mask;
+ uint32_t ep_lp_cap_mask;
+ uint32_t ep_default_adv_cap_mask;
+ uint32_t ep_phy_cap_mask;
+ boolean_t ep_mac_drain;
+ boolean_t ep_mac_stats_pending;
+#if EFSYS_OPT_BIST
+ efx_bist_type_t ep_current_bist;
+#endif
+ const efx_mac_ops_t *ep_emop;
+ const efx_phy_ops_t *ep_epop;
+} efx_port_t;
+
+typedef struct efx_mon_ops_s {
+#if EFSYS_OPT_MON_STATS
+ efx_rc_t (*emo_stats_update)(efx_nic_t *, efsys_mem_t *,
+ efx_mon_stat_value_t *);
+#endif /* EFSYS_OPT_MON_STATS */
+} efx_mon_ops_t;
+
+typedef struct efx_mon_s {
+ efx_mon_type_t em_type;
+ const efx_mon_ops_t *em_emop;
+} efx_mon_t;
+
+typedef struct efx_intr_ops_s {
+ efx_rc_t (*eio_init)(efx_nic_t *, efx_intr_type_t, efsys_mem_t *);
+ void (*eio_enable)(efx_nic_t *);
+ void (*eio_disable)(efx_nic_t *);
+ void (*eio_disable_unlocked)(efx_nic_t *);
+ efx_rc_t (*eio_trigger)(efx_nic_t *, unsigned int);
+ void (*eio_status_line)(efx_nic_t *, boolean_t *, uint32_t *);
+ void (*eio_status_message)(efx_nic_t *, unsigned int,
+ boolean_t *);
+ void (*eio_fatal)(efx_nic_t *);
+ void (*eio_fini)(efx_nic_t *);
+} efx_intr_ops_t;
+
+typedef struct efx_intr_s {
+ const efx_intr_ops_t *ei_eiop;
+ efsys_mem_t *ei_esmp;
+ efx_intr_type_t ei_type;
+ unsigned int ei_level;
+} efx_intr_t;
+
+typedef struct efx_nic_ops_s {
+ efx_rc_t (*eno_probe)(efx_nic_t *);
+ efx_rc_t (*eno_board_cfg)(efx_nic_t *);
+ efx_rc_t (*eno_set_drv_limits)(efx_nic_t *, efx_drv_limits_t*);
+ efx_rc_t (*eno_reset)(efx_nic_t *);
+ efx_rc_t (*eno_init)(efx_nic_t *);
+ efx_rc_t (*eno_get_vi_pool)(efx_nic_t *, uint32_t *);
+ efx_rc_t (*eno_get_bar_region)(efx_nic_t *, efx_nic_region_t,
+ uint32_t *, size_t *);
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*eno_register_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ void (*eno_fini)(efx_nic_t *);
+ void (*eno_unprobe)(efx_nic_t *);
+} efx_nic_ops_t;
+
+#ifndef EFX_TXQ_LIMIT_TARGET
+#define EFX_TXQ_LIMIT_TARGET 259
+#endif
+#ifndef EFX_RXQ_LIMIT_TARGET
+#define EFX_RXQ_LIMIT_TARGET 512
+#endif
+#ifndef EFX_TXQ_DC_SIZE
+#define EFX_TXQ_DC_SIZE 1 /* 16 descriptors */
+#endif
+#ifndef EFX_RXQ_DC_SIZE
+#define EFX_RXQ_DC_SIZE 3 /* 64 descriptors */
+#endif
+
+#if EFSYS_OPT_FILTER
+
+#if EFSYS_OPT_SIENA
+
+typedef struct siena_filter_spec_s {
+ uint8_t sfs_type;
+ uint32_t sfs_flags;
+ uint32_t sfs_dmaq_id;
+ uint32_t sfs_dword[3];
+} siena_filter_spec_t;
+
+typedef enum siena_filter_type_e {
+ EFX_SIENA_FILTER_RX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_RX_TCP_WILD, /* TCP/IPv4 {dIP,dTCP, -, -} */
+ EFX_SIENA_FILTER_RX_UDP_FULL, /* UDP/IPv4 {dIP,dUDP,sIP,sUDP} */
+ EFX_SIENA_FILTER_RX_UDP_WILD, /* UDP/IPv4 {dIP,dUDP, -, -} */
+ EFX_SIENA_FILTER_RX_MAC_FULL, /* Ethernet {dMAC,VLAN} */
+ EFX_SIENA_FILTER_RX_MAC_WILD, /* Ethernet {dMAC, -} */
+
+ EFX_SIENA_FILTER_TX_TCP_FULL, /* TCP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_TCP_WILD, /* TCP/IPv4 { -, -,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_FULL, /* UDP/IPv4 {dIP,dTCP,sIP,sTCP} */
+ EFX_SIENA_FILTER_TX_UDP_WILD, /* UDP/IPv4 { -, -,sIP,sUDP} */
+ EFX_SIENA_FILTER_TX_MAC_FULL, /* Ethernet {sMAC,VLAN} */
+ EFX_SIENA_FILTER_TX_MAC_WILD, /* Ethernet {sMAC, -} */
+
+ EFX_SIENA_FILTER_NTYPES
+} siena_filter_type_t;
+
+typedef enum siena_filter_tbl_id_e {
+ EFX_SIENA_FILTER_TBL_RX_IP = 0,
+ EFX_SIENA_FILTER_TBL_RX_MAC,
+ EFX_SIENA_FILTER_TBL_TX_IP,
+ EFX_SIENA_FILTER_TBL_TX_MAC,
+ EFX_SIENA_FILTER_NTBLS
+} siena_filter_tbl_id_t;
+
+typedef struct siena_filter_tbl_s {
+ int sft_size; /* number of entries */
+ int sft_used; /* active count */
+ uint32_t *sft_bitmap; /* active bitmap */
+ siena_filter_spec_t *sft_spec; /* array of saved specs */
+} siena_filter_tbl_t;
+
+typedef struct siena_filter_s {
+ siena_filter_tbl_t sf_tbl[EFX_SIENA_FILTER_NTBLS];
+ unsigned int sf_depth[EFX_SIENA_FILTER_NTYPES];
+} siena_filter_t;
+
+#endif /* EFSYS_OPT_SIENA */
+
+typedef struct efx_filter_s {
+#if EFSYS_OPT_SIENA
+ siena_filter_t *ef_siena_filter;
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ ef10_filter_table_t *ef_ef10_filter_table;
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+} efx_filter_t;
+
+#if EFSYS_OPT_SIENA
+
+extern void
+siena_filter_tbl_clear(
+ __in efx_nic_t *enp,
+ __in siena_filter_tbl_id_t tbl);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_FILTER */
+
+#if EFSYS_OPT_MCDI
+
+typedef struct efx_mcdi_ops_s {
+ efx_rc_t (*emco_init)(efx_nic_t *, const efx_mcdi_transport_t *);
+ void (*emco_send_request)(efx_nic_t *, void *, size_t,
+ void *, size_t);
+ efx_rc_t (*emco_poll_reboot)(efx_nic_t *);
+ boolean_t (*emco_poll_response)(efx_nic_t *);
+ void (*emco_read_response)(efx_nic_t *, void *, size_t, size_t);
+ void (*emco_fini)(efx_nic_t *);
+ efx_rc_t (*emco_feature_supported)(efx_nic_t *,
+ efx_mcdi_feature_id_t, boolean_t *);
+ void (*emco_get_timeout)(efx_nic_t *, efx_mcdi_req_t *,
+ uint32_t *);
+} efx_mcdi_ops_t;
+
+typedef struct efx_mcdi_s {
+ const efx_mcdi_ops_t *em_emcop;
+ const efx_mcdi_transport_t *em_emtp;
+ efx_mcdi_iface_t em_emip;
+} efx_mcdi_t;
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM
+typedef struct efx_nvram_ops_s {
+#if EFSYS_OPT_DIAG
+ efx_rc_t (*envo_test)(efx_nic_t *);
+#endif /* EFSYS_OPT_DIAG */
+ efx_rc_t (*envo_type_to_partn)(efx_nic_t *, efx_nvram_type_t,
+ uint32_t *);
+ efx_rc_t (*envo_partn_size)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_rw_start)(efx_nic_t *, uint32_t, size_t *);
+ efx_rc_t (*envo_partn_read)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_erase)(efx_nic_t *, uint32_t,
+ unsigned int, size_t);
+ efx_rc_t (*envo_partn_write)(efx_nic_t *, uint32_t,
+ unsigned int, caddr_t, size_t);
+ efx_rc_t (*envo_partn_rw_finish)(efx_nic_t *, uint32_t);
+ efx_rc_t (*envo_partn_get_version)(efx_nic_t *, uint32_t,
+ uint32_t *, uint16_t *);
+ efx_rc_t (*envo_partn_set_version)(efx_nic_t *, uint32_t,
+ uint16_t *);
+ efx_rc_t (*envo_buffer_validate)(efx_nic_t *, uint32_t,
+ caddr_t, size_t);
+} efx_nvram_ops_t;
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+typedef struct efx_vpd_ops_s {
+ efx_rc_t (*evpdo_init)(efx_nic_t *);
+ efx_rc_t (*evpdo_size)(efx_nic_t *, size_t *);
+ efx_rc_t (*evpdo_read)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_verify)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_reinit)(efx_nic_t *, caddr_t, size_t);
+ efx_rc_t (*evpdo_get)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_set)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *);
+ efx_rc_t (*evpdo_next)(efx_nic_t *, caddr_t, size_t,
+ efx_vpd_value_t *, unsigned int *);
+ efx_rc_t (*evpdo_write)(efx_nic_t *, caddr_t, size_t);
+ void (*evpdo_fini)(efx_nic_t *);
+} efx_vpd_ops_t;
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot,
+ __out_opt uint32_t *resultp);
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_LICENSING
+
+typedef struct efx_lic_ops_s {
+ efx_rc_t (*elo_update_licenses)(efx_nic_t *);
+ efx_rc_t (*elo_get_key_stats)(efx_nic_t *, efx_key_stats_t *);
+ efx_rc_t (*elo_app_state)(efx_nic_t *, uint64_t, boolean_t *);
+ efx_rc_t (*elo_get_id)(efx_nic_t *, size_t, uint32_t *,
+ size_t *, uint8_t *);
+ efx_rc_t (*elo_find_start)
+ (efx_nic_t *, caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_find_end)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *);
+ boolean_t (*elo_find_key)(efx_nic_t *, caddr_t, size_t,
+ uint32_t, uint32_t *, uint32_t *);
+ boolean_t (*elo_validate_key)(efx_nic_t *,
+ caddr_t, uint32_t);
+ efx_rc_t (*elo_read_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t, uint32_t,
+ caddr_t, size_t, uint32_t *);
+ efx_rc_t (*elo_write_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ caddr_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_delete_key)(efx_nic_t *,
+ caddr_t, size_t, uint32_t,
+ uint32_t, uint32_t, uint32_t *);
+ efx_rc_t (*elo_create_partition)(efx_nic_t *,
+ caddr_t, size_t);
+ efx_rc_t (*elo_finish_partition)(efx_nic_t *,
+ caddr_t, size_t);
+} efx_lic_ops_t;
+
+#endif
+
+typedef struct efx_drv_cfg_s {
+ uint32_t edc_min_vi_count;
+ uint32_t edc_max_vi_count;
+
+ uint32_t edc_max_piobuf_count;
+ uint32_t edc_pio_alloc_size;
+} efx_drv_cfg_t;
+
+struct efx_nic_s {
+ uint32_t en_magic;
+ efx_family_t en_family;
+ uint32_t en_features;
+ efsys_identifier_t *en_esip;
+ efsys_lock_t *en_eslp;
+ efsys_bar_t *en_esbp;
+ unsigned int en_mod_flags;
+ unsigned int en_reset_flags;
+ efx_nic_cfg_t en_nic_cfg;
+ efx_drv_cfg_t en_drv_cfg;
+ efx_port_t en_port;
+ efx_mon_t en_mon;
+ efx_intr_t en_intr;
+ uint32_t en_ev_qcount;
+ uint32_t en_rx_qcount;
+ uint32_t en_tx_qcount;
+ const efx_nic_ops_t *en_enop;
+ const efx_ev_ops_t *en_eevop;
+ const efx_tx_ops_t *en_etxop;
+ const efx_rx_ops_t *en_erxop;
+#if EFSYS_OPT_FILTER
+ efx_filter_t en_filter;
+ const efx_filter_ops_t *en_efop;
+#endif /* EFSYS_OPT_FILTER */
+#if EFSYS_OPT_MCDI
+ efx_mcdi_t en_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+#if EFSYS_OPT_NVRAM
+ efx_nvram_type_t en_nvram_locked;
+ const efx_nvram_ops_t *en_envop;
+#endif /* EFSYS_OPT_NVRAM */
+#if EFSYS_OPT_VPD
+ const efx_vpd_ops_t *en_evpdop;
+#endif /* EFSYS_OPT_VPD */
+#if EFSYS_OPT_RX_SCALE
+ efx_rx_hash_support_t en_hash_support;
+ efx_rx_scale_support_t en_rss_support;
+ uint32_t en_rss_context;
+#endif /* EFSYS_OPT_RX_SCALE */
+ uint32_t en_vport_id;
+#if EFSYS_OPT_LICENSING
+ const efx_lic_ops_t *en_elop;
+ boolean_t en_licensing_supported;
+#endif
+ union {
+#if EFSYS_OPT_SIENA
+ struct {
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+ unsigned int enu_partn_mask;
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
+#if EFSYS_OPT_VPD
+ caddr_t enu_svpd;
+ size_t enu_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ int enu_unused;
+ } siena;
+#endif /* EFSYS_OPT_SIENA */
+ int enu_unused;
+ } en_u;
+#if (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD)
+ union en_arch {
+ struct {
+ int ena_vi_base;
+ int ena_vi_count;
+ int ena_vi_shift;
+#if EFSYS_OPT_VPD
+ caddr_t ena_svpd;
+ size_t ena_svpd_length;
+#endif /* EFSYS_OPT_VPD */
+ efx_piobuf_handle_t ena_piobuf_handle[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_piobuf_count;
+ uint32_t ena_pio_alloc_map[EF10_MAX_PIOBUF_NBUFS];
+ uint32_t ena_pio_write_vi_base;
+ /* Memory BAR mapping regions */
+ uint32_t ena_uc_mem_map_offset;
+ size_t ena_uc_mem_map_size;
+ uint32_t ena_wc_mem_map_offset;
+ size_t ena_wc_mem_map_size;
+ } ef10;
+ } en_arch;
+#endif /* (EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD) */
+};
+
+
+#define EFX_NIC_MAGIC 0x02121996
+
+typedef boolean_t (*efx_ev_handler_t)(efx_evq_t *, efx_qword_t *,
+ const efx_ev_callbacks_t *, void *);
+
+typedef struct efx_evq_rxq_state_s {
+ unsigned int eers_rx_read_ptr;
+ unsigned int eers_rx_mask;
+#if EFSYS_OPT_RX_PACKED_STREAM
+ unsigned int eers_rx_stream_npackets;
+ boolean_t eers_rx_packed_stream;
+ unsigned int eers_rx_packed_stream_credits;
+#endif
+} efx_evq_rxq_state_t;
+
+struct efx_evq_s {
+ uint32_t ee_magic;
+ efx_nic_t *ee_enp;
+ unsigned int ee_index;
+ unsigned int ee_mask;
+ efsys_mem_t *ee_esmp;
+#if EFSYS_OPT_QSTATS
+ uint32_t ee_stat[EV_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+
+ efx_ev_handler_t ee_rx;
+ efx_ev_handler_t ee_tx;
+ efx_ev_handler_t ee_driver;
+ efx_ev_handler_t ee_global;
+ efx_ev_handler_t ee_drv_gen;
+#if EFSYS_OPT_MCDI
+ efx_ev_handler_t ee_mcdi;
+#endif /* EFSYS_OPT_MCDI */
+
+ efx_evq_rxq_state_t ee_rxq_state[EFX_EV_RX_NLABELS];
+
+ uint32_t ee_flags;
+};
+
+#define EFX_EVQ_MAGIC 0x08081997
+
+#define EFX_EVQ_SIENA_TIMER_QUANTUM_NS 6144 /* 768 cycles */
+
+struct efx_rxq_s {
+ uint32_t er_magic;
+ efx_nic_t *er_enp;
+ efx_evq_t *er_eep;
+ unsigned int er_index;
+ unsigned int er_label;
+ unsigned int er_mask;
+ efsys_mem_t *er_esmp;
+};
+
+#define EFX_RXQ_MAGIC 0x15022005
+
+struct efx_txq_s {
+ uint32_t et_magic;
+ efx_nic_t *et_enp;
+ unsigned int et_index;
+ unsigned int et_mask;
+ efsys_mem_t *et_esmp;
+#if EFSYS_OPT_HUNTINGTON
+ uint32_t et_pio_bufnum;
+ uint32_t et_pio_blknum;
+ uint32_t et_pio_write_offset;
+ uint32_t et_pio_offset;
+ size_t et_pio_size;
+#endif
+#if EFSYS_OPT_QSTATS
+ uint32_t et_stat[TX_NQSTATS];
+#endif /* EFSYS_OPT_QSTATS */
+};
+
+#define EFX_TXQ_MAGIC 0x05092005
+
+#define EFX_MAC_ADDR_COPY(_dst, _src) \
+ do { \
+ (_dst)[0] = (_src)[0]; \
+ (_dst)[1] = (_src)[1]; \
+ (_dst)[2] = (_src)[2]; \
+ (_dst)[3] = (_src)[3]; \
+ (_dst)[4] = (_src)[4]; \
+ (_dst)[5] = (_src)[5]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_MAC_BROADCAST_ADDR_SET(_dst) \
+ do { \
+ uint16_t *_d = (uint16_t *)(_dst); \
+ _d[0] = 0xffff; \
+ _d[1] = 0xffff; \
+ _d[2] = 0xffff; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_OPT_CHECK_REG
+#define EFX_CHECK_REG(_enp, _reg) \
+ do { \
+ const char *name = #_reg; \
+ char min = name[4]; \
+ char max = name[5]; \
+ char rev; \
+ \
+ switch ((_enp)->en_family) { \
+ case EFX_FAMILY_SIENA: \
+ rev = 'C'; \
+ break; \
+ \
+ case EFX_FAMILY_HUNTINGTON: \
+ rev = 'D'; \
+ break; \
+ \
+ case EFX_FAMILY_MEDFORD: \
+ rev = 'E'; \
+ break; \
+ \
+ default: \
+ rev = '?'; \
+ break; \
+ } \
+ \
+ EFSYS_ASSERT3S(rev, >=, min); \
+ EFSYS_ASSERT3S(rev, <=, max); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_CHECK_REG(_enp, _reg) do { \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#endif
+
+#define EFX_BAR_READD(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ EFSYS_PROBE3(efx_bar_readd, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITED(_enp, _reg, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE3(efx_bar_writed, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, _reg ## _OFST, \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ EFSYS_PROBE4(efx_bar_readq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEQ(_enp, _reg, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_writeq, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, _reg ## _OFST, \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_READO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ EFSYS_PROBE6(efx_bar_reado, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_WRITEO(_enp, _reg, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE6(efx_bar_writeo, const char *, #_reg, \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, _reg ## _OFST, \
+ (_eop), B_TRUE); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READD(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READD((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ EFSYS_PROBE4(efx_bar_tbl_readd, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED2(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (2 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITED3(_enp, _reg, _index, _edp, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE4(efx_bar_tbl_writed, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_edp)->ed_u32[0]); \
+ EFSYS_BAR_WRITED((_enp)->en_esbp, \
+ (_reg ## _OFST + \
+ (3 * sizeof (efx_dword_t)) + \
+ ((_index) * _reg ## _STEP)), \
+ (_edp), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ EFSYS_PROBE5(efx_bar_tbl_readq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEQ(_enp, _reg, _index, _eqp) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE5(efx_bar_tbl_writeq, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ EFSYS_BAR_WRITEQ((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eqp)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_READO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_BAR_READO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ EFSYS_PROBE7(efx_bar_tbl_reado, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_BAR_TBL_WRITEO(_enp, _reg, _index, _eop, _lock) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_writeo, const char *, #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop), (_lock)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Allow drivers to perform optimised 128-bit doorbell writes.
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * special-cased in the BIU on the Falcon/Siena and EF10 architectures to avoid
+ * the need for locking in the host, and are the only ones known to be safe to
+ * use 128-bites write with.
+ */
+#define EFX_BAR_TBL_DOORBELL_WRITEO(_enp, _reg, _index, _eop) \
+ do { \
+ EFX_CHECK_REG((_enp), (_reg)); \
+ EFSYS_PROBE7(efx_bar_tbl_doorbell_writeo, \
+ const char *, \
+ #_reg, \
+ uint32_t, (_index), \
+ uint32_t, _reg ## _OFST, \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ EFSYS_BAR_DOORBELL_WRITEO((_enp)->en_esbp, \
+ (_reg ## _OFST + ((_index) * _reg ## _STEP)), \
+ (_eop)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_DMA_SYNC_QUEUE_FOR_DEVICE(_esmp, _entries, _wptr, _owptr) \
+ do { \
+ unsigned int _new = (_wptr); \
+ unsigned int _old = (_owptr); \
+ \
+ if ((_new) >= (_old)) \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ (_old) * sizeof (efx_desc_t), \
+ ((_new) - (_old)) * sizeof (efx_desc_t)); \
+ else \
+ /* \
+ * It is cheaper to sync entire map than sync \
+ * two parts especially when offset/size are \
+ * ignored and entire map is synced in any case.\
+ */ \
+ EFSYS_DMA_SYNC_FOR_DEVICE((_esmp), \
+ 0, \
+ (_entries) * sizeof (efx_desc_t)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+extern __checkReturn efx_rc_t
+efx_nic_biu_test(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp);
+
+extern void
+efx_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high);
+
+extern __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp);
+
+extern void
+efx_phy_unprobe(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_VPD
+
+/* VPD utility functions */
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keyword,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+#endif /* EFSYS_OPT_VPD */
+
+#if EFSYS_OPT_DIAG
+
+extern efx_sram_pattern_fn_t __efx_sram_pattern_fns[];
+
+typedef struct efx_register_set_s {
+ unsigned int address;
+ unsigned int step;
+ unsigned int rows;
+ efx_oword_t mask;
+} efx_register_set_t;
+
+extern __checkReturn efx_rc_t
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count);
+
+extern __checkReturn efx_rc_t
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_MAC_STATS
+
+/*
+ * Closed range of stats (i.e. the first and the last are included).
+ * The last must be greater or equal (if the range is one item only) to
+ * the first.
+ */
+struct efx_mac_stats_range {
+ efx_mac_stat_t first;
+ efx_mac_stat_t last;
+};
+
+extern efx_rc_t
+efx_mac_stats_mask_add_ranges(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in_ecount(rng_count) const struct efx_mac_stats_range *rngp,
+ __in unsigned int rng_count);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_IMPL_H */
diff --git a/drivers/net/sfc/base/efx_intr.c b/drivers/net/sfc/base/efx_intr.c
new file mode 100644
index 00000000..f0422d53
--- /dev/null
+++ b/drivers/net/sfc/base/efx_intr.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp);
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level);
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp);
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp);
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp);
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp);
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp);
+
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_intr_ops_t __efx_intr_siena_ops = {
+ siena_intr_init, /* eio_init */
+ siena_intr_enable, /* eio_enable */
+ siena_intr_disable, /* eio_disable */
+ siena_intr_disable_unlocked, /* eio_disable_unlocked */
+ siena_intr_trigger, /* eio_trigger */
+ siena_intr_status_line, /* eio_status_line */
+ siena_intr_status_message, /* eio_status_message */
+ siena_intr_fatal, /* eio_fatal */
+ siena_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_intr_ops_t __efx_intr_ef10_ops = {
+ ef10_intr_init, /* eio_init */
+ ef10_intr_enable, /* eio_enable */
+ ef10_intr_disable, /* eio_disable */
+ ef10_intr_disable_unlocked, /* eio_disable_unlocked */
+ ef10_intr_trigger, /* eio_trigger */
+ ef10_intr_status_line, /* eio_status_line */
+ ef10_intr_status_message, /* eio_status_message */
+ ef10_intr_fatal, /* eio_fatal */
+ ef10_intr_fini, /* eio_fini */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_INTR) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ eip->ei_esmp = esmp;
+ eip->ei_type = type;
+ eip->ei_level = 0;
+
+ enp->en_mod_flags |= EFX_MOD_INTR;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ eiop = &__efx_intr_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ eiop = &__efx_intr_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ if ((rc = eiop->eio_init(enp, type, esmp)) != 0)
+ goto fail3;
+
+ eip->ei_eiop = eiop;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_INTR;
+}
+
+ void
+efx_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_enable(enp);
+}
+
+ void
+efx_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable(enp);
+}
+
+ void
+efx_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_disable_unlocked(enp);
+}
+
+
+ __checkReturn efx_rc_t
+efx_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ return (eiop->eio_trigger(enp, level));
+}
+
+ void
+efx_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_line(enp, fatalp, qmaskp);
+}
+
+ void
+efx_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_status_message(enp, message, fatalp);
+}
+
+ void
+efx_intr_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ const efx_intr_ops_t *eiop = eip->ei_eiop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ eiop->eio_fatal(enp);
+}
+
+
+/* ************************************************************************* */
+/* ************************************************************************* */
+/* ************************************************************************* */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_intr_init(
+ __in efx_nic_t *enp,
+ __in efx_intr_type_t type,
+ __in efsys_mem_t *esmp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ /*
+ * bug17213 workaround.
+ *
+ * Under legacy interrupts, don't share a level between fatal
+ * interrupts and event queue interrupts. Under MSI-X, they
+ * must share, or we won't get an interrupt.
+ */
+ if (enp->en_family == EFX_FAMILY_SIENA &&
+ eip->ei_type == EFX_INTR_LINE)
+ eip->ei_level = 0x1f;
+ else
+ eip->ei_level = 0;
+
+ /* Enable all the genuinely fatal interrupts */
+ EFX_SET_OWORD(oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_ILL_ADR_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RBUF_OWN_INT_KER_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TBUF_OWN_INT_KER_EN, 0);
+ if (enp->en_family >= EFX_FAMILY_SIENA)
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_FATAL_INTR_REG_KER, &oword);
+
+ /* Set up the interrupt address register */
+ EFX_POPULATE_OWORD_3(oword,
+ FRF_AZ_NORM_INT_VEC_DIS_KER, (type == EFX_INTR_MESSAGE) ? 1 : 0,
+ FRF_AZ_INT_ADR_KER_DW0, EFSYS_MEM_ADDR(esmp) & 0xffffffff,
+ FRF_AZ_INT_ADR_KER_DW1, EFSYS_MEM_ADDR(esmp) >> 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+
+ return (0);
+}
+
+static void
+siena_intr_enable(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+}
+
+static void
+siena_intr_disable(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ EFSYS_SPIN(10);
+}
+
+static void
+siena_intr_disable_unlocked(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFSYS_BAR_READO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_DRV_INT_EN_KER, 0);
+ EFSYS_BAR_WRITEO(enp->en_esbp, FR_AZ_INT_EN_REG_KER_OFST,
+ &oword, B_FALSE);
+}
+
+static __checkReturn efx_rc_t
+siena_intr_trigger(
+ __in efx_nic_t *enp,
+ __in unsigned int level)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_oword_t oword;
+ unsigned int count;
+ uint32_t sel;
+ efx_rc_t rc;
+
+ /* bug16757: No event queues can be initialized */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ if (level >= EFX_NINTR_SIENA) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (level > EFX_MASK32(FRF_AZ_KER_INT_LEVE_SEL))
+ return (ENOTSUP); /* avoid EFSYS_PROBE() */
+
+ sel = level;
+
+ /* Trigger a test interrupt */
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, sel);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ /*
+ * Wait up to 100ms for the interrupt to be raised before restoring
+ * KER_INT_LEVE_SEL. Ignore a failure to raise (the caller will
+ * observe this soon enough anyway), but always reset KER_INT_LEVE_SEL
+ */
+ count = 0;
+ do {
+ EFSYS_SPIN(100); /* 100us */
+
+ EFX_BAR_READO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+ } while (EFX_OWORD_FIELD(oword, FRF_AZ_KER_INT_KER) && ++count < 1000);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_KER_INT_LEVE_SEL, eip->ei_level);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_EN_REG_KER, &oword);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn boolean_t
+siena_intr_check_fatal(
+ __in efx_nic_t *enp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efsys_mem_t *esmp = eip->ei_esmp;
+ efx_oword_t oword;
+
+ /* Read the syndrome */
+ EFSYS_MEM_READO(esmp, 0, &oword);
+
+ if (EFX_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT) != 0) {
+ EFSYS_PROBE(fatal);
+
+ /* Clear the fatal interrupt condition */
+ EFX_SET_OWORD_FIELD(oword, FSF_AZ_NET_IVEC_FATAL_INT, 0);
+ EFSYS_MEM_WRITEO(esmp, 0, &oword);
+
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+static void
+siena_intr_status_line(
+ __in efx_nic_t *enp,
+ __out boolean_t *fatalp,
+ __out uint32_t *qmaskp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+ efx_dword_t dword;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ /*
+ * Read the queue mask and implicitly acknowledge the
+ * interrupt.
+ */
+ EFX_BAR_READD(enp, FR_BZ_INT_ISR0_REG, &dword, B_FALSE);
+ *qmaskp = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ EFSYS_PROBE1(qmask, uint32_t, *qmaskp);
+
+ if (*qmaskp & (1U << eip->ei_level))
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+static void
+siena_intr_status_message(
+ __in efx_nic_t *enp,
+ __in unsigned int message,
+ __out boolean_t *fatalp)
+{
+ efx_intr_t *eip = &(enp->en_intr);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR);
+
+ if (message == eip->ei_level)
+ *fatalp = siena_intr_check_fatal(enp);
+ else
+ *fatalp = B_FALSE;
+}
+
+
+static void
+siena_intr_fatal(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_DECODE_INTR_FATAL
+ efx_oword_t fatal;
+ efx_oword_t mem_per;
+
+ EFX_BAR_READO(enp, FR_AZ_FATAL_INTR_REG_KER, &fatal);
+ EFX_ZERO_OWORD(mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0 ||
+ EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFX_BAR_READO(enp, FR_AZ_MEM_STAT_REG, &mem_per);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRAM_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_BUFID_DC_OOB_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_BUFID_DC_OOB, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_MEM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_MEM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TBUF_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TBUF_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_RDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_RDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_TDESCQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_TDESQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVQ_OWN_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVQ_OWN, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_EVF_OFLO_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_EVFF_OFLO, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_ILL_ADR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_ILL_ADDR, 0, 0);
+
+ if (EFX_OWORD_FIELD(fatal, FRF_AZ_SRM_PERR_INT_KER) != 0)
+ EFSYS_ERR(enp->en_esip, EFX_ERR_SRAM_PERR,
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_0),
+ EFX_OWORD_FIELD(mem_per, EFX_DWORD_1));
+#else
+ EFSYS_ASSERT(0);
+#endif
+}
+
+static void
+siena_intr_fini(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /* Clear the interrupt address register */
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_WRITEO(enp, FR_AZ_INT_ADR_REG_KER, &oword);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_lic.c b/drivers/net/sfc/base/efx_lic.c
new file mode 100644
index 00000000..2cd05cc8
--- /dev/null
+++ b/drivers/net/sfc/base/efx_lic.c
@@ -0,0 +1,1751 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_LICENSING
+
+#include "ef10_tlv_layout.h"
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static const efx_lic_ops_t __efx_lic_v1_ops = {
+ efx_mcdi_fc_license_update_license, /* elo_update_licenses */
+ efx_mcdi_fc_license_get_key_stats, /* elo_get_key_stats */
+ NULL, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static const efx_lic_ops_t __efx_lic_v2_ops = {
+ efx_mcdi_licensing_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_get_key_stats, /* elo_get_key_stats */
+ efx_mcdi_licensed_app_state, /* elo_app_state */
+ NULL, /* elo_get_id */
+ efx_lic_v1v2_find_start, /* elo_find_start */
+ efx_lic_v1v2_find_end, /* elo_find_end */
+ efx_lic_v1v2_find_key, /* elo_find_key */
+ efx_lic_v1v2_validate_key, /* elo_validate_key */
+ efx_lic_v1v2_read_key, /* elo_read_key */
+ efx_lic_v1v2_write_key, /* elo_write_key */
+ efx_lic_v1v2_delete_key, /* elo_delete_key */
+ efx_lic_v1v2_create_partition, /* elo_create_partition */
+ efx_lic_v1v2_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp);
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp);
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ );
+
+static const efx_lic_ops_t __efx_lic_v3_ops = {
+ efx_mcdi_licensing_v3_update_licenses, /* elo_update_licenses */
+ efx_mcdi_licensing_v3_report_license, /* elo_get_key_stats */
+ efx_mcdi_licensing_v3_app_state, /* elo_app_state */
+ efx_mcdi_licensing_v3_get_id, /* elo_get_id */
+ efx_lic_v3_find_start, /* elo_find_start*/
+ efx_lic_v3_find_end, /* elo_find_end */
+ efx_lic_v3_find_key, /* elo_find_key */
+ efx_lic_v3_validate_key, /* elo_validate_key */
+ efx_lic_v3_read_key, /* elo_read_key */
+ efx_lic_v3_write_key, /* elo_write_key */
+ efx_lic_v3_delete_key, /* elo_delete_key */
+ efx_lic_v3_create_partition, /* elo_create_partition */
+ efx_lic_v3_finish_partition, /* elo_finish_partition */
+};
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+
+/* V1 Licensing - used in Siena Modena only */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_update_license(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_FC_IN_LICENSE_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_fc_license_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_FC_IN_LICENSE_LEN,
+ MC_CMD_FC_OUT_LICENSE_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_FC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_FC_IN_LICENSE_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_FC_OUT_LICENSE_LEN;
+
+ MCDI_IN_SET_DWORD(req, FC_IN_CMD,
+ MC_CMD_FC_OP_LICENSE);
+
+ MCDI_IN_SET_DWORD(req, FC_IN_LICENSE_OP,
+ MC_CMD_FC_IN_LICENSE_GET_KEY_STATS);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_FC_OUT_LICENSE_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_INVALID_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, FC_OUT_LICENSE_BLACKLISTED_KEYS);
+ eksp->eks_unverifiable = 0;
+ eksp->eks_wrong_node = 0;
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+/* V1 and V2 Partition format - based on a 16-bit TLV format */
+
+#if EFSYS_OPT_SIENA | EFSYS_OPT_HUNTINGTON
+
+/*
+ * V1/V2 format - defined in SF-108542-TC section 4.2:
+ * Type (T): 16bit - revision/HMAC algorithm
+ * Length (L): 16bit - value length in bytes
+ * Value (V): L bytes - payload
+ */
+#define EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX (256)
+#define EFX_LICENSE_V1V2_HEADER_LENGTH (2 * sizeof(uint16_t))
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *startp = 0;
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ *endp = offset + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ return (0);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ boolean_t found;
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((size_t)buffer_size - offset < EFX_LICENSE_V1V2_HEADER_LENGTH)
+ goto fail1;
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t *)&bufferp[offset])[1]);
+ if ((tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) ||
+ (tlv_type == 0 && tlv_length == 0)) {
+ found = B_FALSE;
+ } else {
+ *startp = offset;
+ *lengthp = tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH;
+ found = B_TRUE;
+ }
+ return (found);
+
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v1v2_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ uint16_t tlv_type;
+ uint16_t tlv_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V1V2_HEADER_LENGTH) {
+ goto fail1;
+ }
+
+ tlv_type = __LE_TO_CPU_16(((uint16_t *)keyp)[0]);
+ tlv_length = __LE_TO_CPU_16(((uint16_t *)keyp)[1]);
+
+ if (tlv_length > EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX) {
+ goto fail2;
+ }
+ if (tlv_type == 0) {
+ goto fail3;
+ }
+ if ((tlv_length + EFX_LICENSE_V1V2_HEADER_LENGTH) != length) {
+ goto fail4;
+ }
+
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ if (key_max_size < length) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+ memcpy(keyp, &bufferp[offset], length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= (EFX_LICENSE_V1V2_PAYLOAD_LENGTH_MAX +
+ EFX_LICENSE_V1V2_HEADER_LENGTH));
+
+ /* Ensure space for terminator remains */
+ if ((offset + length) >
+ (buffer_size - EFX_LICENSE_V1V2_HEADER_LENGTH)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ memcpy(bufferp + offset, keyp, length);
+
+ *lengthp = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ uint32_t move_start = offset + length;
+ uint32_t move_length = end - move_start;
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(end <= buffer_size);
+
+ /* Shift everything after the key down */
+ memmove(bufferp + offset, bufferp + move_start, move_length);
+
+ *deltap = length;
+
+ return (0);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(EFX_LICENSE_V1V2_HEADER_LENGTH <= buffer_size);
+
+ /* Write terminator */
+ memset(bufferp, '\0', EFX_LICENSE_V1V2_HEADER_LENGTH);
+ return (0);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_v1v2_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ _NOTE(ARGUNUSED(enp, bufferp, buffer_size))
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON | EFSYS_OPT_SIENA */
+
+
+/* V2 Licensing - used by Huntington family only. See SF-113611-TC */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensed_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ /* V2 licensing supports 32bit app id only */
+ if ((app_id >> 32) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_APP_STATE_IN_APP_ID,
+ app_id & 0xffffffff);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail3;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used != 0) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_IN_LEN,
+ MC_CMD_LICENSING_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_HUNTINGTON);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_IN_OP,
+ MC_CMD_LICENSING_IN_OP_GET_KEY_STATS);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_VALID_APP_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_INVALID_APP_KEYS);
+ eksp->eks_blacklisted =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_BLACKLISTED_APP_KEYS);
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_UNVERIFIABLE_APP_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_OUT_WRONG_NODE_APP_KEYS);
+ eksp->eks_licensed_apps_lo = 0;
+ eksp->eks_licensed_apps_hi = 0;
+ eksp->eks_licensed_features_lo = 0;
+ eksp->eks_licensed_features_hi = 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+/* V3 Licensing - used starting from Medford family. See SF-114884-SW */
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_update_licenses(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_LICENSING_V3_IN_LEN];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_report_license(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_V3_IN_LEN,
+ MC_CMD_LICENSING_V3_OUT_LEN)];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LICENSING_V3;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LICENSING_V3_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LICENSING_V3_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LICENSING_V3_IN_OP,
+ MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_V3_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ eksp->eks_valid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_VALID_KEYS);
+ eksp->eks_invalid =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_INVALID_KEYS);
+ eksp->eks_blacklisted = 0;
+ eksp->eks_unverifiable =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_UNVERIFIABLE_KEYS);
+ eksp->eks_wrong_node =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_WRONG_NODE_KEYS);
+ eksp->eks_licensed_apps_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_LO);
+ eksp->eks_licensed_apps_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_APPS_HI);
+ eksp->eks_licensed_features_lo =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_LO);
+ eksp->eks_licensed_features_hi =
+ MCDI_OUT_DWORD(req, LICENSING_V3_OUT_LICENSED_FEATURES_HI);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN,
+ MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN)];
+ uint32_t app_state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_MEDFORD);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LICENSED_V3_APP_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO,
+ app_id & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI,
+ app_id >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ app_state = (MCDI_OUT_DWORD(req, GET_LICENSED_V3_APP_STATE_OUT_STATE));
+ if (app_state != MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED) {
+ *licensedp = B_TRUE;
+ } else {
+ *licensedp = B_FALSE;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_licensing_v3_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_bcount_part_opt(buffer_size, *lengthp)
+ uint8_t *bufferp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LICENSING_GET_ID_V3_IN_LEN,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ req.emr_cmd = MC_CMD_LICENSING_GET_ID_V3;
+
+ if (bufferp == NULL) {
+ /* Request id type and length only */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length = MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ (void) memset(payload, 0, sizeof (payload));
+ } else {
+ /* Request full buffer */
+ req.emr_in_buf = bufferp;
+ req.emr_in_length = MC_CMD_LICENSING_GET_ID_V3_IN_LEN;
+ req.emr_out_buf = bufferp;
+ req.emr_out_length = MIN(buffer_size, MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX);
+ (void) memset(bufferp, 0, req.emr_out_length);
+ }
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *typep = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_TYPE);
+ *lengthp = MCDI_OUT_DWORD(req, LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH);
+
+ if (bufferp == NULL) {
+ /* modify length requirements to indicate to caller the extra buffering
+ ** needed to read the complete output.
+ */
+ *lengthp += MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN;
+ } else {
+ /* Shift ID down to start of buffer */
+ memmove(bufferp,
+ bufferp + MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST,
+ *lengthp);
+ memset(bufferp + (*lengthp), 0,
+ MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* V3 format uses Huntington TLV format partition. See SF-108797-SW */
+#define EFX_LICENSE_V3_KEY_LENGTH_MIN (64)
+#define EFX_LICENSE_V3_KEY_LENGTH_MAX (160)
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item_start(bufferp, buffer_size, startp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_end(bufferp, buffer_size, offset, endp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_find_item(bufferp, buffer_size,
+ offset, startp, lengthp);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_v3_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ /* Check key is a valid V3 key */
+ uint8_t key_type;
+ uint8_t key_length;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (length < EFX_LICENSE_V3_KEY_LENGTH_MIN) {
+ goto fail1;
+ }
+
+ if (length > EFX_LICENSE_V3_KEY_LENGTH_MAX) {
+ goto fail2;
+ }
+
+ key_type = ((uint8_t *)keyp)[0];
+ key_length = ((uint8_t *)keyp)[1];
+
+ if (key_type < 3) {
+ goto fail3;
+ }
+ if (key_length > length) {
+ goto fail4;
+ }
+ return (B_TRUE);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE(fail1);
+
+ return (B_FALSE);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+
+ return ef10_nvram_buffer_get_item(bufferp, buffer_size,
+ offset, length, keyp, key_max_size, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT(length <= EFX_LICENSE_V3_KEY_LENGTH_MAX);
+
+ return ef10_nvram_buffer_insert_item(bufferp, buffer_size,
+ offset, keyp, length, lengthp);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = ef10_nvram_buffer_delete_item(bufferp,
+ buffer_size, offset, length, end)) != 0) {
+ goto fail1;
+ }
+
+ *deltap = length;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ /* Construct empty partition */
+ if ((rc = ef10_nvram_buffer_create(enp,
+ NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_v3_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ efx_rc_t rc;
+
+ if ((rc = ef10_nvram_buffer_finish(bufferp,
+ buffer_size)) != 0) {
+ goto fail1;
+ }
+
+ /* Validate completed partition */
+ if ((rc = ef10_nvram_buffer_validate(enp, NVRAM_PARTITION_TYPE_LICENSE,
+ bufferp, buffer_size)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_lic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop;
+ efx_key_stats_t eks;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_LIC));
+
+ switch (enp->en_family) {
+
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ elop = &__efx_lic_v1_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ elop = &__efx_lic_v2_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ elop = &__efx_lic_v3_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_elop = elop;
+ enp->en_mod_flags |= EFX_MOD_LIC;
+
+ /* Probe for support */
+ if (efx_lic_get_key_stats(enp, &eks) == 0) {
+ enp->en_licensing_supported = B_TRUE;
+ } else {
+ enp->en_licensing_supported = B_FALSE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+extern __checkReturn boolean_t
+efx_lic_check_support(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ return enp->en_licensing_supported;
+}
+
+ void
+efx_lic_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ enp->en_elop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_LIC;
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_update_licenses(
+ __in efx_nic_t *enp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_update_licenses(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_key_stats(
+ __in efx_nic_t *enp,
+ __out efx_key_stats_t *eksp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_get_key_stats(enp, eksp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_app_state(
+ __in efx_nic_t *enp,
+ __in uint64_t app_id,
+ __out boolean_t *licensedp)
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_app_state == NULL)
+ return (ENOTSUP);
+
+ if ((rc = elop->elo_app_state(enp, app_id, licensedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_get_id(
+ __in efx_nic_t *enp,
+ __in size_t buffer_size,
+ __out uint32_t *typep,
+ __out size_t *lengthp,
+ __out_opt uint8_t *bufferp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if (elop->elo_get_id == NULL)
+ return (ENOTSUP);
+
+ if ((rc = elop->elo_get_id(enp, buffer_size, typep,
+ lengthp, bufferp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Buffer management API - abstracts varying TLV format used for License partition */
+
+ __checkReturn efx_rc_t
+efx_lic_find_start(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __out uint32_t *startp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_start(enp, bufferp, buffer_size, startp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_find_end(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *endp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_find_end(enp, bufferp, buffer_size, offset, endp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_find_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __out uint32_t *startp,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ EFSYS_ASSERT(bufferp);
+ EFSYS_ASSERT(startp);
+ EFSYS_ASSERT(lengthp);
+
+ return (elop->elo_find_key(enp, bufferp, buffer_size, offset,
+ startp, lengthp));
+}
+
+
+/* Validate that the buffer contains a single key in a recognised format.
+** An empty or terminator buffer is not accepted as a valid key.
+*/
+ __checkReturn __success(return != B_FALSE) boolean_t
+efx_lic_validate_key(
+ __in efx_nic_t *enp,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ boolean_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_validate_key(enp, keyp, length)) == B_FALSE)
+ goto fail1;
+
+ return (B_TRUE);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_read_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __out_bcount_part(key_max_size, *lengthp)
+ caddr_t keyp,
+ __in size_t key_max_size,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_read_key(enp, bufferp, buffer_size, offset,
+ length, keyp, key_max_size, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_write_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in_bcount(length) caddr_t keyp,
+ __in uint32_t length,
+ __out uint32_t *lengthp
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_write_key(enp, bufferp, buffer_size, offset,
+ keyp, length, lengthp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_delete_key(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size,
+ __in uint32_t offset,
+ __in uint32_t length,
+ __in uint32_t end,
+ __out uint32_t *deltap
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_delete_key(enp, bufferp, buffer_size, offset,
+ length, end, deltap)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_lic_create_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_create_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_lic_finish_partition(
+ __in efx_nic_t *enp,
+ __in_bcount(buffer_size)
+ caddr_t bufferp,
+ __in size_t buffer_size
+ )
+{
+ const efx_lic_ops_t *elop = enp->en_elop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_LIC);
+
+ if ((rc = elop->elo_finish_partition(enp, bufferp, buffer_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LICENSING */
diff --git a/drivers/net/sfc/base/efx_mac.c b/drivers/net/sfc/base/efx_mac.c
new file mode 100644
index 00000000..752e7205
--- /dev/null
+++ b/drivers/net/sfc/base/efx_mac.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_SIENA
+static const efx_mac_ops_t __efx_siena_mac_ops = {
+ siena_mac_poll, /* emo_poll */
+ siena_mac_up, /* emo_up */
+ siena_mac_reconfigure, /* emo_addr_set */
+ siena_mac_reconfigure, /* emo_pdu_set */
+ siena_mac_pdu_get, /* emo_pdu_get */
+ siena_mac_reconfigure, /* emo_reconfigure */
+ siena_mac_multicast_list_set, /* emo_multicast_list_set */
+ NULL, /* emo_filter_set_default_rxq */
+ NULL, /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ siena_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ siena_mac_stats_get_mask, /* emo_stats_get_mask */
+ efx_mcdi_mac_stats_clear, /* emo_stats_clear */
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ siena_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_mac_ops_t __efx_ef10_mac_ops = {
+ ef10_mac_poll, /* emo_poll */
+ ef10_mac_up, /* emo_up */
+ ef10_mac_addr_set, /* emo_addr_set */
+ ef10_mac_pdu_set, /* emo_pdu_set */
+ ef10_mac_pdu_get, /* emo_pdu_get */
+ ef10_mac_reconfigure, /* emo_reconfigure */
+ ef10_mac_multicast_list_set, /* emo_multicast_list_set */
+ ef10_mac_filter_default_rxq_set, /* emo_filter_default_rxq_set */
+ ef10_mac_filter_default_rxq_clear,
+ /* emo_filter_default_rxq_clear */
+#if EFSYS_OPT_LOOPBACK
+ ef10_mac_loopback_set, /* emo_loopback_set */
+#endif /* EFSYS_OPT_LOOPBACK */
+#if EFSYS_OPT_MAC_STATS
+ ef10_mac_stats_get_mask, /* emo_stats_get_mask */
+ efx_mcdi_mac_stats_clear, /* emo_stats_clear */
+ efx_mcdi_mac_stats_upload, /* emo_stats_upload */
+ efx_mcdi_mac_stats_periodic, /* emo_stats_periodic */
+ ef10_mac_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MAC_STATS */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mac_pdu_set(
+ __in efx_nic_t *enp,
+ __in size_t pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint32_t old_pdu;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (pdu < EFX_MAC_PDU_MIN) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (pdu > EFX_MAC_PDU_MAX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ old_pdu = epp->ep_mac_pdu;
+ epp->ep_mac_pdu = (uint32_t)pdu;
+ if ((rc = emop->emo_pdu_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ epp->ep_mac_pdu = old_pdu;
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ if ((rc = emop->emo_pdu_get(enp, pdu)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_addr_set(
+ __in efx_nic_t *enp,
+ __in uint8_t *addr)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t old_addr[6];
+ uint32_t oui;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (EFX_MAC_ADDR_IS_MULTICAST(addr)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ oui = addr[0] << 16 | addr[1] << 8 | addr[2];
+ if (oui == 0x000000) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ EFX_MAC_ADDR_COPY(old_addr, epp->ep_mac_addr);
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, addr);
+ if ((rc = emop->emo_addr_set(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFX_MAC_ADDR_COPY(epp->ep_mac_addr, old_addr);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_set(
+ __in efx_nic_t *enp,
+ __in boolean_t all_unicst,
+ __in boolean_t mulcst,
+ __in boolean_t all_mulcst,
+ __in boolean_t brdcst)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ boolean_t old_all_unicst;
+ boolean_t old_mulcst;
+ boolean_t old_all_mulcst;
+ boolean_t old_brdcst;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ old_all_unicst = epp->ep_all_unicst;
+ old_mulcst = epp->ep_mulcst;
+ old_all_mulcst = epp->ep_all_mulcst;
+ old_brdcst = epp->ep_brdcst;
+
+ epp->ep_all_unicst = all_unicst;
+ epp->ep_mulcst = mulcst;
+ epp->ep_all_mulcst = all_mulcst;
+ epp->ep_brdcst = brdcst;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_all_unicst = old_all_unicst;
+ epp->ep_mulcst = old_mulcst;
+ epp->ep_all_mulcst = old_all_mulcst;
+ epp->ep_brdcst = old_brdcst;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_drain(
+ __in efx_nic_t *enp,
+ __in boolean_t enabled)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if (epp->ep_mac_drain == enabled)
+ return (0);
+
+ epp->ep_mac_drain = enabled;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((rc = emop->emo_up(enp, mac_upp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_fcntl_set(
+ __in efx_nic_t *enp,
+ __in unsigned int fcntl,
+ __in boolean_t autoneg)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ unsigned int old_fcntl;
+ boolean_t old_autoneg;
+ unsigned int old_adv_cap;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((fcntl & ~(EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE)) != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /*
+ * Ignore a request to set flow control auto-negotiation
+ * if the PHY doesn't support it.
+ */
+ if (~epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ autoneg = B_FALSE;
+
+ old_fcntl = epp->ep_fcntl;
+ old_autoneg = epp->ep_fcntl_autoneg;
+ old_adv_cap = epp->ep_adv_cap_mask;
+
+ epp->ep_fcntl = fcntl;
+ epp->ep_fcntl_autoneg = autoneg;
+
+ /*
+ * Always encode the flow control settings in the advertised
+ * capabilities even if we are not trying to auto-negotiate
+ * them and reconfigure both the PHY and the MAC.
+ */
+ if (fcntl & EFX_FCNTL_RESPOND)
+ epp->ep_adv_cap_mask |= (1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+ else
+ epp->ep_adv_cap_mask &= ~(1 << EFX_PHY_CAP_PAUSE |
+ 1 << EFX_PHY_CAP_ASYM);
+
+ if (fcntl & EFX_FCNTL_GENERATE)
+ epp->ep_adv_cap_mask ^= (1 << EFX_PHY_CAP_ASYM);
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_fcntl = old_fcntl;
+ epp->ep_fcntl_autoneg = old_autoneg;
+ epp->ep_adv_cap_mask = old_adv_cap;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_fcntl_get(
+ __in efx_nic_t *enp,
+ __out unsigned int *fcntl_wantedp,
+ __out unsigned int *fcntl_linkp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int wanted = 0;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ /*
+ * Decode the requested flow control settings from the PHY
+ * advertised capabilities.
+ */
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_PAUSE))
+ wanted = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ if (epp->ep_adv_cap_mask & (1 << EFX_PHY_CAP_ASYM))
+ wanted ^= EFX_FCNTL_GENERATE;
+
+ *fcntl_linkp = epp->ep_fcntl;
+ *fcntl_wantedp = wanted;
+}
+
+ __checkReturn efx_rc_t
+efx_mac_multicast_list_set(
+ __in efx_nic_t *enp,
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ uint8_t *old_mulcst_addr_list = NULL;
+ uint32_t old_mulcst_addr_count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (count > EFX_MAC_MULTICAST_LIST_MAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ old_mulcst_addr_count = epp->ep_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ /* Allocate memory to store old list (instead of using stack) */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ if (old_mulcst_addr_list == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ /* Save the old list in case we need to rollback */
+ memcpy(old_mulcst_addr_list, epp->ep_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+ }
+
+ /* Store the new list */
+ memcpy(epp->ep_mulcst_addr_list, addrs,
+ count * EFX_MAC_ADDR_LEN);
+ epp->ep_mulcst_addr_count = count;
+
+ if ((rc = emop->emo_multicast_list_set(enp)) != 0)
+ goto fail3;
+
+ if (old_mulcst_addr_count > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ /* Restore original list on failure */
+ epp->ep_mulcst_addr_count = old_mulcst_addr_count;
+ if (old_mulcst_addr_count > 0) {
+ memcpy(epp->ep_mulcst_addr_list, old_mulcst_addr_list,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN);
+
+ EFSYS_KMEM_FREE(enp->en_esip,
+ old_mulcst_addr_count * EFX_MAC_ADDR_LEN,
+ old_mulcst_addr_list);
+ }
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_mac_filter_default_rxq_set(
+ __in efx_nic_t *enp,
+ __in efx_rxq_t *erp,
+ __in boolean_t using_rss)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_set != NULL) {
+ rc = emop->emo_filter_default_rxq_set(enp, erp, using_rss);
+ if (rc != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_mac_filter_default_rxq_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (emop->emo_filter_default_rxq_clear != NULL)
+ emop->emo_filter_default_rxq_clear(enp);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED EfxMacStatNamesBlock c11b91b42f922516 */
+static const char * const __efx_mac_stat_name[] = {
+ "rx_octets",
+ "rx_pkts",
+ "rx_unicst_pkts",
+ "rx_multicst_pkts",
+ "rx_brdcst_pkts",
+ "rx_pause_pkts",
+ "rx_le_64_pkts",
+ "rx_65_to_127_pkts",
+ "rx_128_to_255_pkts",
+ "rx_256_to_511_pkts",
+ "rx_512_to_1023_pkts",
+ "rx_1024_to_15xx_pkts",
+ "rx_ge_15xx_pkts",
+ "rx_errors",
+ "rx_fcs_errors",
+ "rx_drop_events",
+ "rx_false_carrier_errors",
+ "rx_symbol_errors",
+ "rx_align_errors",
+ "rx_internal_errors",
+ "rx_jabber_pkts",
+ "rx_lane0_char_err",
+ "rx_lane1_char_err",
+ "rx_lane2_char_err",
+ "rx_lane3_char_err",
+ "rx_lane0_disp_err",
+ "rx_lane1_disp_err",
+ "rx_lane2_disp_err",
+ "rx_lane3_disp_err",
+ "rx_match_fault",
+ "rx_nodesc_drop_cnt",
+ "tx_octets",
+ "tx_pkts",
+ "tx_unicst_pkts",
+ "tx_multicst_pkts",
+ "tx_brdcst_pkts",
+ "tx_pause_pkts",
+ "tx_le_64_pkts",
+ "tx_65_to_127_pkts",
+ "tx_128_to_255_pkts",
+ "tx_256_to_511_pkts",
+ "tx_512_to_1023_pkts",
+ "tx_1024_to_15xx_pkts",
+ "tx_ge_15xx_pkts",
+ "tx_errors",
+ "tx_sgl_col_pkts",
+ "tx_mult_col_pkts",
+ "tx_ex_col_pkts",
+ "tx_late_col_pkts",
+ "tx_def_pkts",
+ "tx_ex_def_pkts",
+ "pm_trunc_bb_overflow",
+ "pm_discard_bb_overflow",
+ "pm_trunc_vfifo_full",
+ "pm_discard_vfifo_full",
+ "pm_trunc_qbb",
+ "pm_discard_qbb",
+ "pm_discard_mapping",
+ "rxdp_q_disabled_pkts",
+ "rxdp_di_dropped_pkts",
+ "rxdp_streaming_pkts",
+ "rxdp_hlb_fetch",
+ "rxdp_hlb_wait",
+ "vadapter_rx_unicast_packets",
+ "vadapter_rx_unicast_bytes",
+ "vadapter_rx_multicast_packets",
+ "vadapter_rx_multicast_bytes",
+ "vadapter_rx_broadcast_packets",
+ "vadapter_rx_broadcast_bytes",
+ "vadapter_rx_bad_packets",
+ "vadapter_rx_bad_bytes",
+ "vadapter_rx_overflow",
+ "vadapter_tx_unicast_packets",
+ "vadapter_tx_unicast_bytes",
+ "vadapter_tx_multicast_packets",
+ "vadapter_tx_multicast_bytes",
+ "vadapter_tx_broadcast_packets",
+ "vadapter_tx_broadcast_bytes",
+ "vadapter_tx_bad_packets",
+ "vadapter_tx_bad_bytes",
+ "vadapter_tx_overflow",
+};
+/* END MKCONFIG GENERATED EfxMacStatNamesBlock */
+
+ __checkReturn const char *
+efx_mac_stat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MAC_NSTATS);
+ return (__efx_mac_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+static efx_rc_t
+efx_mac_stats_mask_add_range(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in const struct efx_mac_stats_range *rngp)
+{
+ unsigned int mask_npages = mask_size / sizeof (*maskp);
+ unsigned int el;
+ unsigned int el_min;
+ unsigned int el_max;
+ unsigned int low;
+ unsigned int high;
+ unsigned int width;
+ efx_rc_t rc;
+
+ if ((mask_npages * EFX_MAC_STATS_MASK_BITS_PER_PAGE) <=
+ (unsigned int)rngp->last) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(rngp->first, <=, rngp->last);
+ EFSYS_ASSERT3U(rngp->last, <, EFX_MAC_NSTATS);
+
+ for (el = 0; el < mask_npages; ++el) {
+ el_min = el * EFX_MAC_STATS_MASK_BITS_PER_PAGE;
+ el_max =
+ el_min + (EFX_MAC_STATS_MASK_BITS_PER_PAGE - 1);
+ if ((unsigned int)rngp->first > el_max ||
+ (unsigned int)rngp->last < el_min)
+ continue;
+ low = MAX((unsigned int)rngp->first, el_min);
+ high = MIN((unsigned int)rngp->last, el_max);
+ width = high - low + 1;
+ maskp[el] |=
+ (width == EFX_MAC_STATS_MASK_BITS_PER_PAGE) ?
+ (~0ULL) : (((1ULL << width) - 1) << (low - el_min));
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ efx_rc_t
+efx_mac_stats_mask_add_ranges(
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size,
+ __in_ecount(rng_count) const struct efx_mac_stats_range *rngp,
+ __in unsigned int rng_count)
+{
+ unsigned int i;
+ efx_rc_t rc;
+
+ for (i = 0; i < rng_count; ++i) {
+ if ((rc = efx_mac_stats_mask_add_range(maskp, mask_size,
+ &rngp[i])) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __out_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(maskp != NULL);
+ EFSYS_ASSERT(mask_size % sizeof (maskp[0]) == 0);
+
+ (void) memset(maskp, 0, mask_size);
+
+ if ((rc = emop->emo_stats_get_mask(enp, maskp, mask_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ if ((rc = emop->emo_stats_clear(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ /*
+ * Don't assert !ep_mac_stats_pending, because the client might
+ * have failed to finalise statistics when previously stopping
+ * the port.
+ */
+ if ((rc = emop->emo_stats_upload(enp, esmp)) != 0)
+ goto fail1;
+
+ epp->ep_mac_stats_pending = B_TRUE;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+
+ if (emop->emo_stats_periodic == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = emop->emo_stats_periodic(enp, esmp, period_ms, events)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *essp,
+ __inout_opt uint32_t *generationp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ rc = emop->emo_stats_update(enp, esmp, essp, generationp);
+ if (rc == 0)
+ epp->ep_mac_stats_pending = B_FALSE;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn efx_rc_t
+efx_mac_select(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mac_type_t type = EFX_MAC_INVALID;
+ const efx_mac_ops_t *emop;
+ int rc = EINVAL;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emop = &__efx_siena_mac_ops;
+ type = EFX_MAC_SIENA;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emop = &__efx_ef10_mac_ops;
+ type = EFX_MAC_HUNTINGTON;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emop = &__efx_ef10_mac_ops;
+ type = EFX_MAC_MEDFORD;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT(type != EFX_MAC_INVALID);
+ EFSYS_ASSERT3U(type, <, EFX_MAC_NTYPES);
+ EFSYS_ASSERT(emop != NULL);
+
+ epp->ep_emop = emop;
+ epp->ep_mac_type = type;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_SIENA
+
+#define EFX_MAC_HASH_BITS (1 << 8)
+
+/* Compute the multicast hash as used on Falcon and Siena. */
+static void
+siena_mac_multicast_hash_compute(
+ __in_ecount(6*count) uint8_t const *addrs,
+ __in int count,
+ __out efx_oword_t *hash_low,
+ __out efx_oword_t *hash_high)
+{
+ uint32_t crc, index;
+ int i;
+
+ EFSYS_ASSERT(hash_low != NULL);
+ EFSYS_ASSERT(hash_high != NULL);
+
+ EFX_ZERO_OWORD(*hash_low);
+ EFX_ZERO_OWORD(*hash_high);
+
+ for (i = 0; i < count; i++) {
+ /* Calculate hash bucket (IEEE 802.3 CRC32 of the MAC addr) */
+ crc = efx_crc32_calculate(0xffffffff, addrs, EFX_MAC_ADDR_LEN);
+ index = crc % EFX_MAC_HASH_BITS;
+ if (index < 128) {
+ EFX_SET_OWORD_BIT(*hash_low, index);
+ } else {
+ EFX_SET_OWORD_BIT(*hash_high, index - 128);
+ }
+
+ addrs += EFX_MAC_ADDR_LEN;
+ }
+}
+
+static __checkReturn efx_rc_t
+siena_mac_multicast_list_set(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_oword_t old_hash[2];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ memcpy(old_hash, epp->ep_multicst_hash, sizeof (old_hash));
+
+ siena_mac_multicast_hash_compute(
+ epp->ep_mulcst_addr_list,
+ epp->ep_mulcst_addr_count,
+ &epp->ep_multicst_hash[0],
+ &epp->ep_multicst_hash[1]);
+
+ if ((rc = emop->emo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ memcpy(epp->ep_multicst_hash, old_hash, sizeof (old_hash));
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_mcdi.c b/drivers/net/sfc/base/efx_mcdi.c
new file mode 100644
index 00000000..c61b943c
--- /dev/null
+++ b/drivers/net/sfc/base/efx_mcdi.c
@@ -0,0 +1,2346 @@
+/*
+ * Copyright (c) 2008-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MCDI
+
+/*
+ * There are three versions of the MCDI interface:
+ * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers.
+ * - MCDIv1: Siena firmware and Huntington BootROM.
+ * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM.
+ * Transport uses MCDIv2 headers.
+ *
+ * MCDIv2 Header NOT_EPOCH flag
+ * ----------------------------
+ * A new epoch begins at initial startup or after an MC reboot, and defines when
+ * the MC should reject stale MCDI requests.
+ *
+ * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all
+ * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1.
+ *
+ * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a
+ * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0.
+ */
+
+
+
+#if EFSYS_OPT_SIENA
+
+static const efx_mcdi_ops_t __efx_mcdi_siena_ops = {
+ siena_mcdi_init, /* emco_init */
+ siena_mcdi_send_request, /* emco_send_request */
+ siena_mcdi_poll_reboot, /* emco_poll_reboot */
+ siena_mcdi_poll_response, /* emco_poll_response */
+ siena_mcdi_read_response, /* emco_read_response */
+ siena_mcdi_fini, /* emco_fini */
+ siena_mcdi_feature_supported, /* emco_feature_supported */
+ siena_mcdi_get_timeout, /* emco_get_timeout */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = {
+ ef10_mcdi_init, /* emco_init */
+ ef10_mcdi_send_request, /* emco_send_request */
+ ef10_mcdi_poll_reboot, /* emco_poll_reboot */
+ ef10_mcdi_poll_response, /* emco_poll_response */
+ ef10_mcdi_read_response, /* emco_read_response */
+ ef10_mcdi_fini, /* emco_fini */
+ ef10_mcdi_feature_supported, /* emco_feature_supported */
+ ef10_mcdi_get_timeout, /* emco_get_timeout */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *emtp)
+{
+ const efx_mcdi_ops_t *emcop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ emcop = &__efx_mcdi_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ emcop = &__efx_mcdi_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (enp->en_features & EFX_FEATURE_MCDI_DMA) {
+ /* MCDI requires a DMA buffer in host memory */
+ if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ }
+ enp->en_mcdi.em_emtp = emtp;
+
+ if (emcop != NULL && emcop->emco_init != NULL) {
+ if ((rc = emcop->emco_init(enp, emtp)) != 0)
+ goto fail3;
+ }
+
+ enp->en_mcdi.em_emcop = emcop;
+ enp->en_mod_flags |= EFX_MOD_MCDI;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mcdi.em_emtp = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+
+ return (rc);
+}
+
+ void
+efx_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI);
+
+ if (emcop != NULL && emcop->emco_fini != NULL)
+ emcop->emco_fini(enp);
+
+ emip->emi_port = 0;
+ emip->emi_aborted = 0;
+
+ enp->en_mcdi.em_emcop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_MCDI;
+}
+
+ void
+efx_mcdi_new_epoch(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efsys_lock_state_t state;
+
+ /* Start a new epoch (allow fresh MCDI requests to succeed) */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emip->emi_new_epoch = B_TRUE;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+}
+
+static void
+efx_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in void *hdrp,
+ __in size_t hdr_len,
+ __in void *sdup,
+ __in size_t sdu_len)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len);
+}
+
+static efx_rc_t
+efx_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ rc = emcop->emco_poll_reboot(enp);
+ return (rc);
+}
+
+static boolean_t
+efx_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ boolean_t available;
+
+ available = emcop->emco_poll_response(enp);
+ return (available);
+}
+
+static void
+efx_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_read_response(enp, bufferp, offset, length);
+}
+
+ void
+efx_mcdi_request_start(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __in boolean_t ev_cpl)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ size_t hdr_len;
+ unsigned int max_version;
+ unsigned int seq;
+ unsigned int xflags;
+ boolean_t new_epoch;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_request_start() is naturally serialised against both
+ * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(),
+ * by virtue of there only being one outstanding MCDI request.
+ * Unfortunately, upper layers may also call efx_mcdi_request_abort()
+ * at any time, to timeout a pending mcdi request, That request may
+ * then subsequently complete, meaning efx_mcdi_ev_cpl() or
+ * efx_mcdi_ev_death() may end up running in parallel with
+ * efx_mcdi_request_start(). This race is handled by ensuring that
+ * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the
+ * en_eslp lock.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ EFSYS_ASSERT(emip->emi_pending_req == NULL);
+ emip->emi_pending_req = emrp;
+ emip->emi_ev_cpl = ev_cpl;
+ emip->emi_poll_cnt = 0;
+ seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ);
+ new_epoch = emip->emi_new_epoch;
+ max_version = emip->emi_max_version;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ xflags = 0;
+ if (ev_cpl)
+ xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+ /*
+ * Huntington firmware supports MCDIv2, but the Huntington BootROM only
+ * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where
+ * possible to support this.
+ */
+ if ((max_version >= 2) &&
+ ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) ||
+ (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1))) {
+ /* Construct MCDI v2 header */
+ hdr_len = sizeof (hdr);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length);
+ } else {
+ /* Construct MCDI v1 header */
+ hdr_len = sizeof (hdr[0]);
+ EFX_POPULATE_DWORD_8(hdr[0],
+ MCDI_HEADER_CODE, emrp->emr_cmd,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_DATALEN, emrp->emr_in_length,
+ MCDI_HEADER_SEQ, seq,
+ MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1,
+ MCDI_HEADER_ERROR, 0,
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_XFLAGS, xflags);
+ }
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST,
+ &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ efx_mcdi_send_request(enp, &hdr[0], hdr_len,
+ emrp->emr_in_buf, emrp->emr_in_length);
+}
+
+
+static void
+efx_mcdi_read_response_header(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ unsigned int data_len;
+ unsigned int seq;
+ unsigned int cmd;
+ unsigned int error;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(emrp != NULL);
+
+ efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0]));
+ hdr_len = sizeof (hdr[0]);
+
+ cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE);
+ seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ);
+ error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR);
+
+ if (cmd != MC_CMD_V2_EXTN) {
+ data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN);
+ } else {
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD);
+ data_len =
+ EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ if (error && (data_len == 0)) {
+ /* The MC has rebooted since the request was sent. */
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ efx_mcdi_poll_reboot(enp);
+ rc = EIO;
+ goto fail1;
+ }
+ if ((cmd != emrp->emr_cmd) ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ /* Response is for a different request */
+ rc = EIO;
+ goto fail2;
+ }
+ if (error) {
+ efx_dword_t err[2];
+ unsigned int err_len = MIN(data_len, sizeof (err));
+ int err_code = MC_CMD_ERR_EPROTO;
+ int err_arg = 0;
+
+ /* Read error code (and arg num for MCDI v2 commands) */
+ efx_mcdi_read_response(enp, &err, hdr_len, err_len);
+
+ if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t)))
+ err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0);
+#ifdef WITH_MCDI_V2
+ if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t)))
+ err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0);
+#endif
+ emrp->emr_err_code = err_code;
+ emrp->emr_err_arg = err_arg;
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ if ((err_code == MC_CMD_ERR_PROXY_PENDING) &&
+ (err_len == sizeof (err))) {
+ /*
+ * The MCDI request would normally fail with EPERM, but
+ * firmware has forwarded it to an authorization agent
+ * attached to a privileged PF.
+ *
+ * Save the authorization request handle. The client
+ * must wait for a PROXY_RESPONSE event, or timeout.
+ */
+ emrp->emr_proxy_handle = err_arg;
+ }
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ &err[0], err_len);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd,
+ int, err_code, int, err_arg);
+ }
+
+ rc = efx_mcdi_request_errcode(err_code);
+ goto fail3;
+ }
+
+ emrp->emr_rc = 0;
+ emrp->emr_out_length_used = data_len;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ emrp->emr_proxy_handle = 0;
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+ return;
+
+fail3:
+fail2:
+fail1:
+ emrp->emr_rc = rc;
+ emrp->emr_out_length_used = 0;
+}
+
+static void
+efx_mcdi_finish_response(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp)
+{
+#if EFSYS_OPT_MCDI_LOGGING
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+ efx_dword_t hdr[2];
+ unsigned int hdr_len;
+ size_t bytes;
+
+ if (emrp->emr_out_buf == NULL)
+ return;
+
+ /* Read the command header to detect MCDI response format */
+ hdr_len = sizeof (hdr[0]);
+ efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len);
+ if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) {
+ /*
+ * Read the actual payload length. The length given in the event
+ * is only correct for responses with the V1 format.
+ */
+ efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1]));
+ hdr_len += sizeof (hdr[1]);
+
+ emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1],
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
+
+ /* Copy payload out into caller supplied buffer */
+ bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length);
+ efx_mcdi_read_response(enp, emrp->emr_out_buf, hdr_len, bytes);
+
+#if EFSYS_OPT_MCDI_LOGGING
+ if (emtp->emt_logger != NULL) {
+ emtp->emt_logger(emtp->emt_context,
+ EFX_LOG_MCDI_RESPONSE,
+ &hdr[0], hdr_len,
+ emrp->emr_out_buf, bytes);
+ }
+#endif /* EFSYS_OPT_MCDI_LOGGING */
+}
+
+
+ __checkReturn boolean_t
+efx_mcdi_request_poll(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ efsys_lock_state_t state;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /* Serialise against post-watchdog efx_mcdi_ev* */
+ EFSYS_LOCK(enp->en_eslp, state);
+
+ EFSYS_ASSERT(emip->emi_pending_req != NULL);
+ EFSYS_ASSERT(!emip->emi_ev_cpl);
+ emrp = emip->emi_pending_req;
+
+ /* Check for reboot atomically w.r.t efx_mcdi_request_start */
+ if (emip->emi_poll_cnt++ == 0) {
+ if ((rc = efx_mcdi_poll_reboot(enp)) != 0) {
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ /* Reboot/Assertion */
+ if (rc == EIO || rc == EINTR)
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ goto fail1;
+ }
+ }
+
+ /* Check if a response is available */
+ if (efx_mcdi_poll_response(enp) == B_FALSE) {
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return (B_FALSE);
+ }
+
+ /* Read the response header */
+ efx_mcdi_read_response_header(enp, emrp);
+
+ /* Request complete */
+ emip->emi_pending_req = NULL;
+
+ /* Ensure stale MCDI requests fail after an MC reboot. */
+ emip->emi_new_epoch = B_FALSE;
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if ((rc = emrp->emr_rc) != 0)
+ goto fail2;
+
+ efx_mcdi_finish_response(enp, emrp);
+ return (B_TRUE);
+
+fail2:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE(fail2);
+fail1:
+ if (!emrp->emr_quiet)
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (B_TRUE);
+}
+
+ __checkReturn boolean_t
+efx_mcdi_request_abort(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t *emrp;
+ boolean_t aborted;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * efx_mcdi_ev_* may have already completed this event, and be
+ * spinning/blocked on the upper layer lock. So it *is* legitimate
+ * to for emi_pending_req to be NULL. If there is a pending event
+ * completed request, then provide a "credit" to allow
+ * efx_mcdi_ev_cpl() to accept a single spurious completion.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ aborted = (emrp != NULL);
+ if (aborted) {
+ emip->emi_pending_req = NULL;
+
+ /* Error the request */
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = ETIMEDOUT;
+
+ /* Provide a credit for seqno/emr_pending_req mismatches */
+ if (emip->emi_ev_cpl)
+ ++emip->emi_aborted;
+
+ /*
+ * The upper layer has called us, so we don't
+ * need to complete the request.
+ */
+ }
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ return (aborted);
+}
+
+ void
+efx_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+
+ emcop->emco_get_timeout(enp, emrp, timeoutp);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err)
+{
+
+ switch (err) {
+ /* MCDI v1 */
+ case MC_CMD_ERR_EPERM:
+ return (EACCES);
+ case MC_CMD_ERR_ENOENT:
+ return (ENOENT);
+ case MC_CMD_ERR_EINTR:
+ return (EINTR);
+ case MC_CMD_ERR_EACCES:
+ return (EACCES);
+ case MC_CMD_ERR_EBUSY:
+ return (EBUSY);
+ case MC_CMD_ERR_EINVAL:
+ return (EINVAL);
+ case MC_CMD_ERR_EDEADLK:
+ return (EDEADLK);
+ case MC_CMD_ERR_ENOSYS:
+ return (ENOTSUP);
+ case MC_CMD_ERR_ETIME:
+ return (ETIMEDOUT);
+ case MC_CMD_ERR_ENOTSUP:
+ return (ENOTSUP);
+ case MC_CMD_ERR_EALREADY:
+ return (EALREADY);
+
+ /* MCDI v2 */
+ case MC_CMD_ERR_EEXIST:
+ return (EEXIST);
+#ifdef MC_CMD_ERR_EAGAIN
+ case MC_CMD_ERR_EAGAIN:
+ return (EAGAIN);
+#endif
+#ifdef MC_CMD_ERR_ENOSPC
+ case MC_CMD_ERR_ENOSPC:
+ return (ENOSPC);
+#endif
+ case MC_CMD_ERR_ERANGE:
+ return (ERANGE);
+
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return (ENOMEM);
+ case MC_CMD_ERR_NO_VADAPTOR:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_EVB_PORT:
+ return (ENOENT);
+ case MC_CMD_ERR_NO_VSWITCH:
+ return (ENODEV);
+ case MC_CMD_ERR_VLAN_LIMIT:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_PCI_FUNC:
+ return (ENODEV);
+ case MC_CMD_ERR_BAD_VLAN_MODE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VSWITCH_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_BAD_VPORT_TYPE:
+ return (EINVAL);
+ case MC_CMD_ERR_MAC_EXIST:
+ return (EEXIST);
+
+ case MC_CMD_ERR_PROXY_PENDING:
+ return (EAGAIN);
+
+ default:
+ EFSYS_PROBE1(mc_pcol_error, int, err);
+ return (EIO);
+ }
+}
+
+ void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_exception_t exception;
+
+ /* Reboot or Assertion failure only */
+ EFSYS_ASSERT(rc == EIO || rc == EINTR);
+
+ /*
+ * If MC_CMD_REBOOT causes a reboot (dependent on parameters),
+ * then the EIO is not worthy of an exception.
+ */
+ if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO)
+ return;
+
+ exception = (rc == EIO)
+ ? EFX_MCDI_EXCEPTION_MC_REBOOT
+ : EFX_MCDI_EXCEPTION_MC_BADASSERT;
+
+ emtp->emt_exception(emtp->emt_context, exception);
+}
+
+ void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_FALSE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ emrp->emr_quiet = B_TRUE;
+ emtp->emt_execute(emtp->emt_context, emrp);
+}
+
+ void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp;
+ efsys_lock_state_t state;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ /*
+ * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start()
+ * when we're completing an aborted request.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl ||
+ (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) {
+ EFSYS_ASSERT(emip->emi_aborted > 0);
+ if (emip->emi_aborted > 0)
+ --emip->emi_aborted;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+ return;
+ }
+
+ emrp = emip->emi_pending_req;
+ emip->emi_pending_req = NULL;
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ if (emip->emi_max_version >= 2) {
+ /* MCDIv2 response details do not fit into an event. */
+ efx_mcdi_read_response_header(enp, emrp);
+ } else {
+ if (errcode != 0) {
+ if (!emrp->emr_quiet) {
+ EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd,
+ int, errcode);
+ }
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = efx_mcdi_request_errcode(errcode);
+ } else {
+ emrp->emr_out_length_used = outlen;
+ emrp->emr_rc = 0;
+ }
+ }
+ if (errcode == 0) {
+ efx_mcdi_finish_response(enp, emrp);
+ }
+
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep)
+{
+ efx_rc_t rc;
+
+ /*
+ * Return proxy handle from MCDI request that returned with error
+ * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching
+ * PROXY_RESPONSE event.
+ */
+ if ((emrp == NULL) || (handlep == NULL)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if ((emrp->emr_rc != 0) &&
+ (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) {
+ *handlep = emrp->emr_proxy_handle;
+ rc = 0;
+ } else {
+ *handlep = 0;
+ rc = ENOENT;
+ }
+ return (rc);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status)
+{
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_rc_t rc;
+
+ /*
+ * Handle results of an authorization request for a privileged MCDI
+ * command. If authorization was granted then we must re-issue the
+ * original MCDI request. If authorization failed or timed out,
+ * then the original MCDI request should be completed with the
+ * result code from this event.
+ */
+ rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status);
+
+ emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc);
+}
+#endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
+
+ void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp;
+ efx_mcdi_req_t *emrp = NULL;
+ boolean_t ev_cpl;
+ efsys_lock_state_t state;
+
+ /*
+ * The MCDI request (if there is one) has been terminated, either
+ * by a BADASSERT or REBOOT event.
+ *
+ * If there is an outstanding event-completed MCDI operation, then we
+ * will never receive the completion event (because both MCDI
+ * completions and BADASSERT events are sent to the same evq). So
+ * complete this MCDI op.
+ *
+ * This function might run in parallel with efx_mcdi_request_poll()
+ * for poll completed mcdi requests, and also with
+ * efx_mcdi_request_start() for post-watchdog completions.
+ */
+ EFSYS_LOCK(enp->en_eslp, state);
+ emrp = emip->emi_pending_req;
+ ev_cpl = emip->emi_ev_cpl;
+ if (emrp != NULL && emip->emi_ev_cpl) {
+ emip->emi_pending_req = NULL;
+
+ emrp->emr_out_length_used = 0;
+ emrp->emr_rc = rc;
+ ++emip->emi_aborted;
+ }
+
+ /*
+ * Since we're running in parallel with a request, consume the
+ * status word before dropping the lock.
+ */
+ if (rc == EIO || rc == EINTR) {
+ EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US);
+ (void) efx_mcdi_poll_reboot(enp);
+ emip->emi_new_epoch = B_TRUE;
+ }
+
+ EFSYS_UNLOCK(enp->en_eslp, state);
+
+ efx_mcdi_raise_exception(enp, emrp, rc);
+
+ if (emrp != NULL && ev_cpl)
+ emtp->emt_ev_cpl(emtp->emt_context);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_GET_VERSION_IN_LEN,
+ MC_CMD_GET_VERSION_OUT_LEN),
+ MAX(MC_CMD_GET_BOOT_STATUS_IN_LEN,
+ MC_CMD_GET_BOOT_STATUS_OUT_LEN))];
+ efx_word_t *ver_words;
+ uint16_t version[4];
+ uint32_t build;
+ efx_mcdi_boot_t status;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_VERSION;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* bootrom support */
+ if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+ goto version;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION);
+ version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0);
+ version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0);
+ version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0);
+ version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0);
+ build = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE);
+
+version:
+ /* The bootrom doesn't understand BOOT_STATUS */
+ if (MC_FW_VERSION_IS_BOOTLOADER(build)) {
+ status = EFX_MCDI_BOOT_ROM;
+ goto out;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOOT_STATUS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot access BOOT_STATUS */
+ status = EFX_MCDI_BOOT_PRIMARY;
+ version[0] = version[1] = version[2] = version[3] = 0;
+ build = 0;
+ goto out;
+ }
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail4;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS,
+ GET_BOOT_STATUS_OUT_FLAGS_PRIMARY))
+ status = EFX_MCDI_BOOT_PRIMARY;
+ else
+ status = EFX_MCDI_BOOT_SECONDARY;
+
+out:
+ if (versionp != NULL)
+ memcpy(versionp, version, sizeof (version));
+ if (buildp != NULL)
+ *buildp = build;
+ if (statusp != NULL)
+ *statusp = status;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *flagsp,
+ __out_opt uint16_t *rx_dpcpu_fw_idp,
+ __out_opt uint16_t *tx_dpcpu_fw_idp,
+ __out_opt uint32_t *flags2p,
+ __out_opt uint32_t *tso2ncp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_CAPABILITIES_IN_LEN,
+ MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)];
+ boolean_t v2_capable;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_CAPABILITIES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN;
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (flagsp != NULL)
+ *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1);
+
+ if (rx_dpcpu_fw_idp != NULL)
+ *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
+
+ if (tx_dpcpu_fw_idp != NULL)
+ *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
+
+ if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ v2_capable = B_FALSE;
+ else
+ v2_capable = B_TRUE;
+
+ if (flags2p != NULL) {
+ *flags2p = (v2_capable) ?
+ MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) :
+ 0;
+ }
+
+ if (tso2ncp != NULL) {
+ *tso2ncp = (v2_capable) ?
+ MCDI_OUT_WORD(req,
+ GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) :
+ 0;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_do_reboot(
+ __in efx_nic_t *enp,
+ __in boolean_t after_assertion)
+{
+ uint8_t payload[MAX(MC_CMD_REBOOT_IN_LEN, MC_CMD_REBOOT_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ /*
+ * We could require the caller to have caused en_mod_flags=0 to
+ * call this function. This doesn't help the other port though,
+ * who's about to get the MC ripped out from underneath them.
+ * Since they have to cope with the subsequent fallout of MCDI
+ * failures, we should as well.
+ */
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_REBOOT;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_REBOOT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_REBOOT_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS,
+ (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0));
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot reboot the MC. */
+ goto out;
+ }
+
+ /* A successful reboot request returns EIO. */
+ if (req.emr_rc != 0 && req.emr_rc != EIO) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+out:
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_reboot(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_FALSE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp)
+{
+ return (efx_mcdi_do_reboot(enp, B_TRUE));
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_ASSERTS_IN_LEN,
+ MC_CMD_GET_ASSERTS_OUT_LEN)];
+ const char *reason;
+ unsigned int flags;
+ unsigned int index;
+ unsigned int ofst;
+ int retry;
+ efx_rc_t rc;
+
+ /*
+ * Before we attempt to chat to the MC, we should verify that the MC
+ * isn't in it's assertion handler, either due to a previous reboot,
+ * or because we're reinitializing due to an eec_exception().
+ *
+ * Use GET_ASSERTS to read any assertion state that may be present.
+ * Retry this command twice. Once because a boot-time assertion failure
+ * might cause the 1st MCDI request to fail. And once again because
+ * we might race with efx_mcdi_exit_assertion_handler() running on
+ * partner port(s) on the same NIC.
+ */
+ retry = 2;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_ASSERTS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1);
+ efx_mcdi_execute_quiet(enp, &req);
+
+ } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0);
+
+ if (req.emr_rc != 0) {
+ if (req.emr_rc == EACCES) {
+ /* Unprivileged functions cannot clear assertions. */
+ goto out;
+ }
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /* Print out any assertion state recorded */
+ flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+ return (0);
+
+ reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+ ? "system-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+ ? "thread-level assertion"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+ ? "watchdog reset"
+ : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP)
+ ? "illegal address trap"
+ : "unknown assertion";
+ EFSYS_PROBE3(mcpu_assertion,
+ const char *, reason, unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ unsigned int,
+ MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS));
+
+ /* Print out the registers (r1 ... r31) */
+ ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
+ for (index = 1;
+ index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++) {
+ EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int,
+ EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst),
+ EFX_DWORD_0));
+ ofst += sizeof (efx_dword_t);
+ }
+ EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN);
+
+out:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * Internal routines for for specific MCDI requests.
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_DRV_ATTACH_IN_LEN,
+ MC_CMD_DRV_ATTACH_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_DRV_ATTACH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN;
+
+ /*
+ * Use DONT_CARE for the datapath firmware type to ensure that the
+ * driver can attach to an unprivileged function. The datapath firmware
+ * type to use is controlled by the 'sfboot' utility.
+ */
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_NEW_STATE, attach ? 1 : 0);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_DONT_CARE);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMIN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (mac_addrp != NULL) {
+ uint8_t *addrp;
+
+ if (emip->emi_port == 1) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0);
+ } else if (emip->emi_port == 2) {
+ addrp = MCDI_OUT2(req, uint8_t,
+ GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ EFX_MAC_ADDR_COPY(mac_addrp, addrp);
+ }
+
+ if (capabilitiesp != NULL) {
+ if (emip->emi_port == 1) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
+ } else if (emip->emi_port == 2) {
+ *capabilitiesp = *MCDI_OUT2(req, efx_dword_t,
+ GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
+ } else {
+ rc = EINVAL;
+ goto fail4;
+ }
+ }
+
+ if (board_typep != NULL) {
+ *board_typep = MCDI_OUT_DWORD(req,
+ GET_BOARD_CFG_OUT_BOARD_TYPE);
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RESOURCE_LIMITS_IN_LEN,
+ MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (nevqp != NULL)
+ *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ);
+ if (nrxqp != NULL)
+ *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ);
+ if (ntxqp != NULL)
+ *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_CFG_IN_LEN,
+ MC_CMD_GET_PHY_CFG_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE);
+#if EFSYS_OPT_NAMES
+ (void) strncpy(encp->enc_phy_name,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME),
+ MIN(sizeof (encp->enc_phy_name) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_NAME_LEN));
+#endif /* EFSYS_OPT_NAMES */
+ (void) memset(encp->enc_phy_revision, 0,
+ sizeof (encp->enc_phy_revision));
+ memcpy(encp->enc_phy_revision,
+ MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION),
+ MIN(sizeof (encp->enc_phy_revision) - 1,
+ MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN));
+#if EFSYS_OPT_PHY_LED_CONTROL
+ encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) |
+ (1 << EFX_PHY_LED_OFF) |
+ (1 << EFX_PHY_LED_ON));
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ /* Get the media type of the fixed port, if recognised. */
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T);
+ EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS);
+ epp->ep_fixed_port_type =
+ (efx_phy_media_type_t) MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE);
+ if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES)
+ epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID;
+
+ epp->ep_phy_cap_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+#if EFSYS_OPT_PHY_FLAGS
+ encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT);
+
+ /* Populate internal state */
+ encp->enc_mcdi_mdio_channel =
+ (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL);
+
+#if EFSYS_OPT_PHY_STATS
+ encp->enc_mcdi_phy_stat_mask =
+ MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+ encp->enc_bist_mask = 0;
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_SHORT))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST_CABLE_LONG))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG);
+ if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS,
+ GET_PHY_CFG_OUT_BIST))
+ encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL);
+#endif /* EFSYS_OPT_BIST */
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported updates */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC changes */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported link control */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp)
+{
+ const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop;
+ efx_rc_t rc;
+
+ if (emcop != NULL) {
+ if ((rc = emcop->emco_feature_supported(enp,
+ EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0)
+ goto fail1;
+ } else {
+ /* Earlier devices always supported MAC spoofing */
+ *supportedp = B_TRUE;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_BIST
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+/*
+ * Enter bist offline mode. This is a fw mode which puts the NIC into a state
+ * where memory BIST tests can be run and not much else can interfere or happen.
+ * A reboot is required to exit this mode.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0);
+ EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_START_BIST_IN_LEN,
+ MC_CMD_START_BIST_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_START_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_START_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_START_BIST_OUT_LEN;
+
+ switch (type) {
+ case EFX_BIST_TYPE_PHY_NORMAL:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_SHORT:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_SHORT);
+ break;
+ case EFX_BIST_TYPE_PHY_CABLE_LONG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PHY_BIST_CABLE_LONG);
+ break;
+ case EFX_BIST_TYPE_MC_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_MC_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_SAT_MEM:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_PORT_MEM_BIST);
+ break;
+ case EFX_BIST_TYPE_REG:
+ MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE,
+ MC_CMD_REG_BIST);
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ }
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+
+/* Enable logging of some events (e.g. link state changes) */
+ __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_LOG_CTRL_IN_LEN,
+ MC_CMD_LOG_CTRL_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_LOG_CTRL;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST,
+ MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ);
+ MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#if EFSYS_OPT_MAC_STATS
+
+typedef enum efx_stats_action_e {
+ EFX_STATS_CLEAR,
+ EFX_STATS_UPLOAD,
+ EFX_STATS_ENABLE_NOEVENTS,
+ EFX_STATS_ENABLE_EVENTS,
+ EFX_STATS_DISABLE,
+} efx_stats_action_t;
+
+static __checkReturn efx_rc_t
+efx_mcdi_mac_stats(
+ __in efx_nic_t *enp,
+ __in_opt efsys_mem_t *esmp,
+ __in efx_stats_action_t action,
+ __in uint16_t period_ms)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_MAC_STATS_IN_LEN,
+ MC_CMD_MAC_STATS_OUT_DMA_LEN)];
+ int clear = (action == EFX_STATS_CLEAR);
+ int upload = (action == EFX_STATS_UPLOAD);
+ int enable = (action == EFX_STATS_ENABLE_NOEVENTS);
+ int events = (action == EFX_STATS_ENABLE_EVENTS);
+ int disable = (action == EFX_STATS_DISABLE);
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_MAC_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_MAC_STATS_OUT_DMA_LEN;
+
+ MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, upload,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable,
+ MAC_STATS_IN_PERIODIC_ENABLE, enable | events,
+ MAC_STATS_IN_PERIODIC_NOEVENT, !events,
+ MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0);
+
+ if (esmp != NULL) {
+ int bytes = MC_CMD_MAC_NSTATS * sizeof (uint64_t);
+
+ EFX_STATIC_ASSERT(MC_CMD_MAC_NSTATS * sizeof (uint64_t) <=
+ EFX_MAC_STATS_SIZE);
+
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes);
+ } else {
+ EFSYS_ASSERT(!upload && !enable && !events);
+ }
+
+ /*
+ * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats,
+ * as this may fail (and leave periodic DMA enabled) if the
+ * vadapter has already been deleted.
+ */
+ MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID,
+ (disable ? EVB_PORT_ID_NULL : enp->en_vport_id));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ /* EF10: Expect ENOENT if no DMA queues are initialised */
+ if ((req.emr_rc != ENOENT) ||
+ (enp->en_rx_qcount + enp->en_tx_qcount != 0)) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_CLEAR, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ */
+ if ((rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_UPLOAD, 0)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events)
+{
+ efx_rc_t rc;
+
+ /*
+ * The MC DMAs aggregate statistics for our convenience, so we can
+ * avoid having to pull the statistics buffer into the cache to
+ * maintain cumulative statistics.
+ * Huntington uses a fixed 1sec period.
+ * Medford uses a fixed 1sec period before v6.2.1.1033 firmware.
+ */
+ if (period_ms == 0)
+ rc = efx_mcdi_mac_stats(enp, NULL, EFX_STATS_DISABLE, 0);
+ else if (events)
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_EVENTS,
+ period_ms);
+ else
+ rc = efx_mcdi_mac_stats(enp, esmp, EFX_STATS_ENABLE_NOEVENTS,
+ period_ms);
+
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+/*
+ * This function returns the pf and vf number of a function. If it is a pf the
+ * vf number is 0xffff. The vf number is the index of the vf on that
+ * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0),
+ * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff).
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_get_function_info(
+ __in efx_nic_t *enp,
+ __out uint32_t *pfp,
+ __out_opt uint32_t *vfp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_FUNCTION_INFO_IN_LEN,
+ MC_CMD_GET_FUNCTION_INFO_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_FUNCTION_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF);
+ if (vfp != NULL)
+ *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_privilege_mask(
+ __in efx_nic_t *enp,
+ __in uint32_t pf,
+ __in uint32_t vf,
+ __out uint32_t *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PRIVILEGE_MASK_IN_LEN,
+ MC_CMD_PRIVILEGE_MASK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PRIVILEGE_MASK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN;
+
+ MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION,
+ PRIVILEGE_MASK_IN_FUNCTION_PF, pf,
+ PRIVILEGE_MASK_IN_FUNCTION_VF, vf);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_mcdi_set_workaround(
+ __in efx_nic_t *enp,
+ __in uint32_t type,
+ __in boolean_t enabled,
+ __out_opt uint32_t *flagsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_WORKAROUND_IN_LEN,
+ MC_CMD_WORKAROUND_EXT_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_WORKAROUND;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type);
+ MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (flagsp != NULL) {
+ if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+ *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS);
+ else
+ *flagsp = 0;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_workarounds(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *implementedp,
+ __out_opt uint32_t *enabledp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MC_CMD_GET_WORKAROUNDS_OUT_LEN];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_WORKAROUNDS;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (implementedp != NULL) {
+ *implementedp =
+ MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+ }
+
+ if (enabledp != NULL) {
+ *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED);
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * Size of media information page in accordance with SFF-8472 and SFF-8436.
+ * It is used in MCDI interface as well.
+ */
+#define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_phy_media_info(
+ __in efx_nic_t *enp,
+ __in uint32_t mcdi_page,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN,
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE))];
+ efx_rc_t rc;
+
+ EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length =
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used !=
+ MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) !=
+ EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+ len);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * 2-wire device address of the base information in accordance with SFF-8472
+ * Diagnostic Monitoring Interface for Optical Transceivers section
+ * 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE 0xA0
+
+/*
+ * 2-wire device address of the digital diagnostics monitoring interface
+ * in accordance with SFF-8472 Diagnostic Monitoring Interface for Optical
+ * Transceivers section 4 Memory Organization.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM 0xA2
+
+/*
+ * Hard wired 2-wire device address for QSFP+ in accordance with SFF-8436
+ * QSFP+ 10 Gbs 4X PLUGGABLE TRANSCEIVER section 7.4 Device Addressing and
+ * Operation.
+ */
+#define EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP 0xA0
+
+ __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_rc_t rc;
+ uint32_t mcdi_lower_page;
+ uint32_t mcdi_upper_page;
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ /*
+ * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages.
+ * Offset plus length interface allows to access page 0 only.
+ * I.e. non-zero upper pages are not accessible.
+ * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6
+ * QSFP+ Memory Map for details on how information is structured
+ * and accessible.
+ */
+ switch (epp->ep_fixed_port_type) {
+ case EFX_PHY_MEDIA_SFP_PLUS:
+ /*
+ * In accordance with SFF-8472 Diagnostic Monitoring
+ * Interface for Optical Transceivers section 4 Memory
+ * Organization two 2-wire addresses are defined.
+ */
+ switch (dev_addr) {
+ /* Base information */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE:
+ /*
+ * MCDI page 0 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA0.
+ */
+ mcdi_lower_page = 0;
+ /*
+ * MCDI page 1 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA0.
+ */
+ mcdi_upper_page = 1;
+ break;
+ /* Diagnostics */
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM:
+ /*
+ * MCDI page 2 should be used to access lower
+ * page 0 (0x00 - 0x7f) at the device address 0xA2.
+ */
+ mcdi_lower_page = 2;
+ /*
+ * MCDI page 3 should be used to access upper
+ * page 0 (0x80 - 0xff) at the device address 0xA2.
+ */
+ mcdi_upper_page = 3;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ case EFX_PHY_MEDIA_QSFP_PLUS:
+ switch (dev_addr) {
+ case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP:
+ /*
+ * MCDI page -1 should be used to access lower page 0
+ * (0x00 - 0x7f).
+ */
+ mcdi_lower_page = (uint32_t)-1;
+ /*
+ * MCDI page 0 should be used to access upper page 0
+ * (0x80h - 0xff).
+ */
+ mcdi_upper_page = 0;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) {
+ uint8_t read_len =
+ MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_lower_page, offset, read_len, data);
+ if (rc != 0)
+ goto fail2;
+
+ data += read_len;
+ len -= read_len;
+
+ offset = 0;
+ } else {
+ offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE;
+ }
+
+ if (len > 0) {
+ EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+ EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE);
+
+ rc = efx_mcdi_get_phy_media_info(enp,
+ mcdi_upper_page, offset, len, data);
+ if (rc != 0)
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MCDI */
diff --git a/drivers/net/sfc/base/efx_mcdi.h b/drivers/net/sfc/base/efx_mcdi.h
new file mode 100644
index 00000000..21727713
--- /dev/null
+++ b/drivers/net/sfc/base/efx_mcdi.h
@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_MCDI_H
+#define _SYS_EFX_MCDI_H
+
+#include "efx.h"
+#include "efx_regs_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 10ms for the status word to be set.
+ */
+#define EFX_MCDI_STATUS_SLEEP_US 10000
+
+struct efx_mcdi_req_s {
+ boolean_t emr_quiet;
+ /* Inputs: Command #, input buffer and length */
+ unsigned int emr_cmd;
+ uint8_t *emr_in_buf;
+ size_t emr_in_length;
+ /* Outputs: retcode, buffer, length, and length used*/
+ efx_rc_t emr_rc;
+ uint8_t *emr_out_buf;
+ size_t emr_out_length;
+ size_t emr_out_length_used;
+ /* Internals: low level transport details */
+ unsigned int emr_err_code;
+ unsigned int emr_err_arg;
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+ uint32_t emr_proxy_handle;
+#endif
+};
+
+typedef struct efx_mcdi_iface_s {
+ unsigned int emi_port;
+ unsigned int emi_max_version;
+ unsigned int emi_seq;
+ efx_mcdi_req_t *emi_pending_req;
+ boolean_t emi_ev_cpl;
+ boolean_t emi_new_epoch;
+ int emi_aborted;
+ uint32_t emi_poll_cnt;
+ uint32_t emi_mc_reboot_status;
+} efx_mcdi_iface_t;
+
+extern void
+efx_mcdi_execute(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_execute_quiet(
+ __in efx_nic_t *enp,
+ __inout efx_mcdi_req_t *emrp);
+
+extern void
+efx_mcdi_ev_cpl(
+ __in efx_nic_t *enp,
+ __in unsigned int seq,
+ __in unsigned int outlen,
+ __in int errcode);
+
+#if EFSYS_OPT_MCDI_PROXY_AUTH
+extern __checkReturn efx_rc_t
+efx_mcdi_get_proxy_handle(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *handlep);
+
+extern void
+efx_mcdi_ev_proxy_response(
+ __in efx_nic_t *enp,
+ __in unsigned int handle,
+ __in unsigned int status);
+#endif
+
+extern void
+efx_mcdi_ev_death(
+ __in efx_nic_t *enp,
+ __in int rc);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_request_errcode(
+ __in unsigned int err);
+
+extern void
+efx_mcdi_raise_exception(
+ __in efx_nic_t *enp,
+ __in_opt efx_mcdi_req_t *emrp,
+ __in int rc);
+
+typedef enum efx_mcdi_boot_e {
+ EFX_MCDI_BOOT_PRIMARY,
+ EFX_MCDI_BOOT_SECONDARY,
+ EFX_MCDI_BOOT_ROM,
+} efx_mcdi_boot_t;
+
+extern __checkReturn efx_rc_t
+efx_mcdi_version(
+ __in efx_nic_t *enp,
+ __out_ecount_opt(4) uint16_t versionp[4],
+ __out_opt uint32_t *buildp,
+ __out_opt efx_mcdi_boot_t *statusp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_capabilities(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *flagsp,
+ __out_opt uint16_t *rx_dpcpu_fw_idp,
+ __out_opt uint16_t *tx_dpcpu_fw_idp,
+ __out_opt uint32_t *flags2p,
+ __out_opt uint32_t *tso2ncp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_read_assertion(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_exit_assertion_handler(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_drv_attach(
+ __in efx_nic_t *enp,
+ __in boolean_t attach);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_board_cfg(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *board_typep,
+ __out_opt efx_dword_t *capabilitiesp,
+ __out_ecount_opt(6) uint8_t mac_addrp[6]);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_phy_cfg(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_firmware_update_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_macaddr_change_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_link_control_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_spoofing_supported(
+ __in efx_nic_t *enp,
+ __out boolean_t *supportedp);
+
+
+#if EFSYS_OPT_BIST
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_enable_offline(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+extern __checkReturn efx_rc_t
+efx_mcdi_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_get_resource_limits(
+ __in efx_nic_t *enp,
+ __out_opt uint32_t *nevqp,
+ __out_opt uint32_t *nrxqp,
+ __out_opt uint32_t *ntxqp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_log_ctrl(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_clear(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_upload(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp);
+
+extern __checkReturn efx_rc_t
+efx_mcdi_mac_stats_periodic(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint16_t period_ms,
+ __in boolean_t events);
+
+
+#if EFSYS_OPT_LOOPBACK
+extern __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+extern __checkReturn efx_rc_t
+efx_mcdi_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data);
+
+#define MCDI_IN(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_in_buf + (_ofst)))
+
+#define MCDI_IN2(_emr, _type, _ofst) \
+ MCDI_IN(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_IN_SET_BYTE(_emr, _ofst, _value) \
+ EFX_POPULATE_BYTE_1(*MCDI_IN2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0, _value)
+
+#define MCDI_IN_SET_WORD(_emr, _ofst, _value) \
+ EFX_POPULATE_WORD_1(*MCDI_IN2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0, _value)
+
+#define MCDI_IN_SET_DWORD(_emr, _ofst, _value) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0, _value)
+
+#define MCDI_IN_SET_DWORD_FIELD(_emr, _ofst, _field, _value) \
+ EFX_SET_DWORD_FIELD(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field, _value)
+
+#define MCDI_IN_POPULATE_DWORD_1(_emr, _ofst, _field1, _value1) \
+ EFX_POPULATE_DWORD_1(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1)
+
+#define MCDI_IN_POPULATE_DWORD_2(_emr, _ofst, _field1, _value1, \
+ _field2, _value2) \
+ EFX_POPULATE_DWORD_2(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2)
+
+#define MCDI_IN_POPULATE_DWORD_3(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_3(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3)
+
+#define MCDI_IN_POPULATE_DWORD_4(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4) \
+ EFX_POPULATE_DWORD_4(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4)
+
+#define MCDI_IN_POPULATE_DWORD_5(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5) \
+ EFX_POPULATE_DWORD_5(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5)
+
+#define MCDI_IN_POPULATE_DWORD_6(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_6(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6)
+
+#define MCDI_IN_POPULATE_DWORD_7(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7) \
+ EFX_POPULATE_DWORD_7(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7)
+
+#define MCDI_IN_POPULATE_DWORD_8(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8) \
+ EFX_POPULATE_DWORD_8(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8)
+
+#define MCDI_IN_POPULATE_DWORD_9(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_9(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9)
+
+#define MCDI_IN_POPULATE_DWORD_10(_emr, _ofst, _field1, _value1, \
+ _field2, _value2, _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, _field7, _value7, \
+ _field8, _value8, _field9, _value9, _field10, _value10) \
+ EFX_POPULATE_DWORD_10(*MCDI_IN2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field1, _value1, \
+ MC_CMD_ ## _field2, _value2, \
+ MC_CMD_ ## _field3, _value3, \
+ MC_CMD_ ## _field4, _value4, \
+ MC_CMD_ ## _field5, _value5, \
+ MC_CMD_ ## _field6, _value6, \
+ MC_CMD_ ## _field7, _value7, \
+ MC_CMD_ ## _field8, _value8, \
+ MC_CMD_ ## _field9, _value9, \
+ MC_CMD_ ## _field10, _value10)
+
+#define MCDI_OUT(_emr, _type, _ofst) \
+ ((_type *)((_emr).emr_out_buf + (_ofst)))
+
+#define MCDI_OUT2(_emr, _type, _ofst) \
+ MCDI_OUT(_emr, _type, MC_CMD_ ## _ofst ## _OFST)
+
+#define MCDI_OUT_BYTE(_emr, _ofst) \
+ EFX_BYTE_FIELD(*MCDI_OUT2(_emr, efx_byte_t, _ofst), \
+ EFX_BYTE_0)
+
+#define MCDI_OUT_WORD(_emr, _ofst) \
+ EFX_WORD_FIELD(*MCDI_OUT2(_emr, efx_word_t, _ofst), \
+ EFX_WORD_0)
+
+#define MCDI_OUT_DWORD(_emr, _ofst) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ EFX_DWORD_0)
+
+#define MCDI_OUT_DWORD_FIELD(_emr, _ofst, _field) \
+ EFX_DWORD_FIELD(*MCDI_OUT2(_emr, efx_dword_t, _ofst), \
+ MC_CMD_ ## _field)
+
+#define MCDI_EV_FIELD(_eqp, _field) \
+ EFX_QWORD_FIELD(*_eqp, MCDI_EVENT_ ## _field)
+
+#define MCDI_CMD_DWORD_FIELD(_edp, _field) \
+ EFX_DWORD_FIELD(*_edp, MC_CMD_ ## _field)
+
+#define EFX_MCDI_HAVE_PRIVILEGE(mask, priv) \
+ (((mask) & (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv)) == \
+ (MC_CMD_PRIVILEGE_MASK_IN_GRP_ ## priv))
+
+typedef enum efx_mcdi_feature_id_e {
+ EFX_MCDI_FEATURE_FW_UPDATE = 0,
+ EFX_MCDI_FEATURE_LINK_CONTROL,
+ EFX_MCDI_FEATURE_MACADDR_CHANGE,
+ EFX_MCDI_FEATURE_MAC_SPOOFING,
+ EFX_MCDI_FEATURE_NIDS
+} efx_mcdi_feature_id_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_MCDI_H */
diff --git a/drivers/net/sfc/base/efx_mon.c b/drivers/net/sfc/base/efx_mon.c
new file mode 100644
index 00000000..c2f1e97e
--- /dev/null
+++ b/drivers/net/sfc/base/efx_mon.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_NAMES
+
+static const char * const __efx_mon_name[] = {
+ "",
+ "sfx90x0",
+ "sfx91x0",
+ "sfx92x0"
+};
+
+ const char *
+efx_mon_name(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ EFSYS_ASSERT3U(encp->enc_mon_type, <, EFX_MON_NTYPES);
+ return (__efx_mon_name[encp->enc_mon_type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#if EFSYS_OPT_MON_MCDI
+static const efx_mon_ops_t __efx_mon_mcdi_ops = {
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_stats_update /* emo_stats_update */
+#endif /* EFSYS_OPT_MON_STATS */
+};
+#endif
+
+
+ __checkReturn efx_rc_t
+efx_mon_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_MON) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_MON;
+
+ emp->em_type = encp->enc_mon_type;
+
+ EFSYS_ASSERT(encp->enc_mon_type != EFX_MON_INVALID);
+ switch (emp->em_type) {
+#if EFSYS_OPT_MON_MCDI
+ case EFX_MON_SFC90X0:
+ case EFX_MON_SFC91X0:
+ case EFX_MON_SFC92X0:
+ emop = &__efx_mon_mcdi_ops;
+ break;
+#endif
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ emp->em_emop = emop;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_MON_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED MonitorStatNamesBlock 5daa2a5725ba734b */
+static const char * const __mon_stat_name[] = {
+ "value_2_5v",
+ "value_vccp1",
+ "value_vcc",
+ "value_5v",
+ "value_12v",
+ "value_vccp2",
+ "value_ext_temp",
+ "value_int_temp",
+ "value_ain1",
+ "value_ain2",
+ "controller_cooling",
+ "ext_cooling",
+ "1v",
+ "1_2v",
+ "1_8v",
+ "3_3v",
+ "1_2va",
+ "vref",
+ "vaoe",
+ "aoe_temperature",
+ "psu_aoe_temperature",
+ "psu_temperature",
+ "fan0",
+ "fan1",
+ "fan2",
+ "fan3",
+ "fan4",
+ "vaoe_in",
+ "iaoe",
+ "iaoe_in",
+ "nic_power",
+ "0_9v",
+ "i0_9v",
+ "i1_2v",
+ "0_9v_adc",
+ "controller_temperature2",
+ "vreg_temperature",
+ "vreg_0_9v_temperature",
+ "vreg_1_2v_temperature",
+ "int_vptat",
+ "controller_internal_adc_temperature",
+ "ext_vptat",
+ "controller_external_adc_temperature",
+ "ambient_temperature",
+ "airflow",
+ "vdd08d_vss08d_csr",
+ "vdd08d_vss08d_csr_extadc",
+ "hotpoint_temperature",
+ "phy_power_switch_port0",
+ "phy_power_switch_port1",
+ "mum_vcc",
+ "0v9_a",
+ "i0v9_a",
+ "0v9_a_temp",
+ "0v9_b",
+ "i0v9_b",
+ "0v9_b_temp",
+ "ccom_avreg_1v2_supply",
+ "ccom_avreg_1v2_supply_ext_adc",
+ "ccom_avreg_1v8_supply",
+ "ccom_avreg_1v8_supply_ext_adc",
+ "controller_master_vptat",
+ "controller_master_internal_temp",
+ "controller_master_vptat_ext_adc",
+ "controller_master_internal_temp_ext_adc",
+ "controller_slave_vptat",
+ "controller_slave_internal_temp",
+ "controller_slave_vptat_ext_adc",
+ "controller_slave_internal_temp_ext_adc",
+ "sodimm_vout",
+ "sodimm_0_temp",
+ "sodimm_1_temp",
+ "phy0_vcc",
+ "phy1_vcc",
+ "controller_tdiode_temp",
+ "board_front_temp",
+ "board_back_temp",
+};
+
+/* END MKCONFIG GENERATED MonitorStatNamesBlock */
+
+extern const char *
+efx_mon_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_mon_stat_t id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(id, <, EFX_MON_NSTATS);
+ return (__mon_stat_name[id]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+ const efx_mon_ops_t *emop = emp->em_emop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ return (emop->emo_stats_update(enp, esmp, values));
+}
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+ void
+efx_mon_fini(
+ __in efx_nic_t *enp)
+{
+ efx_mon_t *emp = &(enp->en_mon);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MON);
+
+ emp->em_emop = NULL;
+
+ emp->em_type = EFX_MON_INVALID;
+
+ enp->en_mod_flags &= ~EFX_MOD_MON;
+}
diff --git a/drivers/net/sfc/base/efx_nic.c b/drivers/net/sfc/base/efx_nic.c
new file mode 100644
index 00000000..76caa744
--- /dev/null
+++ b/drivers/net/sfc/base/efx_nic.c
@@ -0,0 +1,1110 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_family(
+ __in uint16_t venid,
+ __in uint16_t devid,
+ __out efx_family_t *efp)
+{
+ if (venid == EFX_PCI_VENID_SFC) {
+ switch (devid) {
+#if EFSYS_OPT_SIENA
+ case EFX_PCI_DEVID_SIENA_F1_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Siena.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+
+ case EFX_PCI_DEVID_BETHPAGE:
+ case EFX_PCI_DEVID_SIENA:
+ *efp = EFX_FAMILY_SIENA;
+ return (0);
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Huntington.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+
+ case EFX_PCI_DEVID_FARMINGDALE:
+ case EFX_PCI_DEVID_GREENPORT:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+
+ case EFX_PCI_DEVID_FARMINGDALE_VF:
+ case EFX_PCI_DEVID_GREENPORT_VF:
+ *efp = EFX_FAMILY_HUNTINGTON;
+ return (0);
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_PCI_DEVID_MEDFORD_PF_UNINIT:
+ /*
+ * Hardware default for PF0 of uninitialised Medford.
+ * manftest must be able to cope with this device id.
+ */
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+
+ case EFX_PCI_DEVID_MEDFORD:
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+
+ case EFX_PCI_DEVID_MEDFORD_VF:
+ *efp = EFX_FAMILY_MEDFORD;
+ return (0);
+#endif /* EFSYS_OPT_MEDFORD */
+
+ case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */
+ default:
+ break;
+ }
+ }
+
+ *efp = EFX_FAMILY_INVALID;
+ return (ENOTSUP);
+}
+
+
+#define EFX_BIU_MAGIC0 0x01234567
+#define EFX_BIU_MAGIC1 0xfedcba98
+
+ __checkReturn efx_rc_t
+efx_nic_biu_test(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ /*
+ * Write magic values to scratch registers 0 and 1, then
+ * verify that the values were written correctly. Interleave
+ * the accesses to ensure that the BIU is not just reading
+ * back the cached value that was last written.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail2;
+ }
+
+ /*
+ * Perform the same test, with the values swapped. This
+ * ensures that subsequent tests don't start with the correct
+ * values already written into the scratch registers.
+ */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC1);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_DRIVER_DW0, EFX_BIU_MAGIC0);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 0, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC1) {
+ rc = EIO;
+ goto fail3;
+ }
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_DRIVER_REG, 1, &oword, B_TRUE);
+ if (EFX_OWORD_FIELD(oword, FRF_AZ_DRIVER_DW0) != EFX_BIU_MAGIC0) {
+ rc = EIO;
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nic_ops_t __efx_nic_siena_ops = {
+ siena_nic_probe, /* eno_probe */
+ NULL, /* eno_board_cfg */
+ NULL, /* eno_set_drv_limits */
+ siena_nic_reset, /* eno_reset */
+ siena_nic_init, /* eno_init */
+ NULL, /* eno_get_vi_pool */
+ NULL, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ siena_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nic_fini, /* eno_fini */
+ siena_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+
+static const efx_nic_ops_t __efx_nic_hunt_ops = {
+ ef10_nic_probe, /* eno_probe */
+ hunt_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+
+static const efx_nic_ops_t __efx_nic_medford_ops = {
+ ef10_nic_probe, /* eno_probe */
+ medford_board_cfg, /* eno_board_cfg */
+ ef10_nic_set_drv_limits, /* eno_set_drv_limits */
+ ef10_nic_reset, /* eno_reset */
+ ef10_nic_init, /* eno_init */
+ ef10_nic_get_vi_pool, /* eno_get_vi_pool */
+ ef10_nic_get_bar_region, /* eno_get_bar_region */
+#if EFSYS_OPT_DIAG
+ ef10_nic_register_test, /* eno_register_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nic_fini, /* eno_fini */
+ ef10_nic_unprobe, /* eno_unprobe */
+};
+
+#endif /* EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_nic_create(
+ __in efx_family_t family,
+ __in efsys_identifier_t *esip,
+ __in efsys_bar_t *esbp,
+ __in efsys_lock_t *eslp,
+ __deref_out efx_nic_t **enpp)
+{
+ efx_nic_t *enp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID);
+ EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES);
+
+ /* Allocate a NIC object */
+ EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp);
+
+ if (enp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ enp->en_magic = EFX_NIC_MAGIC;
+
+ switch (family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ enp->en_enop = &__efx_nic_siena_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LFSR_HASH_INSERT |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_LOOKAHEAD_SPLIT |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_TX_SRC_FILTERS;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ enp->en_enop = &__efx_nic_hunt_ops;
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ enp->en_enop = &__efx_nic_medford_ops;
+ /*
+ * FW_ASSISTED_TSO omitted as Medford only supports firmware
+ * assisted TSO version 2, not the v1 scheme used on Huntington.
+ */
+ enp->en_features =
+ EFX_FEATURE_IPV6 |
+ EFX_FEATURE_LINK_EVENTS |
+ EFX_FEATURE_PERIODIC_MAC_STATS |
+ EFX_FEATURE_MCDI |
+ EFX_FEATURE_MAC_HEADER_FILTERS |
+ EFX_FEATURE_MCDI_DMA |
+ EFX_FEATURE_PIO_BUFFERS |
+ EFX_FEATURE_FW_ASSISTED_TSO_V2 |
+ EFX_FEATURE_PACKED_STREAM;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+
+ enp->en_family = family;
+ enp->en_esip = esip;
+ enp->en_esbp = esbp;
+ enp->en_eslp = eslp;
+
+ *enpp = enp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_probe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
+
+ enop = enp->en_enop;
+ if ((rc = enop->eno_probe(enp)) != 0)
+ goto fail1;
+
+ if ((rc = efx_phy_probe(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_PROBE;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ enop->eno_unprobe(enp);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_set_drv_limits(
+ __inout efx_nic_t *enp,
+ __in efx_drv_limits_t *edlp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enop->eno_set_drv_limits != NULL) {
+ if ((rc = enop->eno_set_drv_limits(enp, edlp)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_get_bar_region(
+ __in efx_nic_t *enp,
+ __in efx_nic_region_t region,
+ __out uint32_t *offsetp,
+ __out size_t *sizep)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_bar_region == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if ((rc = (enop->eno_get_bar_region)(enp,
+ region, offsetp, sizep)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_get_vi_pool(
+ __in efx_nic_t *enp,
+ __out uint32_t *evq_countp,
+ __out uint32_t *rxq_countp,
+ __out uint32_t *txq_countp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enop->eno_get_vi_pool != NULL) {
+ uint32_t vi_count = 0;
+
+ if ((rc = (enop->eno_get_vi_pool)(enp, &vi_count)) != 0)
+ goto fail1;
+
+ *evq_countp = vi_count;
+ *rxq_countp = vi_count;
+ *txq_countp = vi_count;
+ } else {
+ /* Use NIC limits as default value */
+ *evq_countp = encp->enc_evq_limit;
+ *rxq_countp = encp->enc_rxq_limit;
+ *txq_countp = encp->enc_txq_limit;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ if (enp->en_mod_flags & EFX_MOD_NIC) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_init(enp)) != 0)
+ goto fail2;
+
+ enp->en_mod_flags |= EFX_MOD_NIC;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_nic_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ enop->eno_fini(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_NIC;
+}
+
+ void
+efx_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+#if EFSYS_OPT_MCDI
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+#endif /* EFSYS_OPT_MCDI */
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+
+ efx_phy_unprobe(enp);
+
+ enop->eno_unprobe(enp);
+
+ enp->en_mod_flags &= ~EFX_MOD_PROBE;
+}
+
+ void
+efx_nic_destroy(
+ __in efx_nic_t *enp)
+{
+ efsys_identifier_t *esip = enp->en_esip;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
+
+ enp->en_family = EFX_FAMILY_INVALID;
+ enp->en_esip = NULL;
+ enp->en_esbp = NULL;
+ enp->en_eslp = NULL;
+
+ enp->en_enop = NULL;
+
+ enp->en_magic = 0;
+
+ /* Free the NIC object */
+ EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_reset(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ unsigned int mod_flags;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
+ /*
+ * All modules except the MCDI, PROBE, NVRAM, VPD, MON
+ * (which we do not reset here) must have been shut down or never
+ * initialized.
+ *
+ * A rule of thumb here is: If the controller or MC reboots, is *any*
+ * state lost. If it's lost and needs reapplying, then the module
+ * *must* not be initialised during the reset.
+ */
+ mod_flags = enp->en_mod_flags;
+ mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
+ EFX_MOD_VPD | EFX_MOD_MON);
+ EFSYS_ASSERT3U(mod_flags, ==, 0);
+ if (mod_flags != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = enop->eno_reset(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ const efx_nic_cfg_t *
+efx_nic_cfg_get(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ return (&(enp->en_nic_cfg));
+}
+
+ __checkReturn efx_rc_t
+efx_nic_get_fw_version(
+ __in efx_nic_t *enp,
+ __out efx_nic_fw_info_t *enfip)
+{
+ uint16_t mc_fw_version[4];
+ efx_rc_t rc;
+
+ if (enfip == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
+ EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
+
+ rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL);
+ if (rc != 0)
+ goto fail2;
+
+ rc = efx_mcdi_get_capabilities(enp, NULL,
+ &enfip->enfi_rx_dpcpu_fw_id,
+ &enfip->enfi_tx_dpcpu_fw_id,
+ NULL, NULL);
+ if (rc == 0) {
+ enfip->enfi_dpcpu_fw_ids_valid = B_TRUE;
+ } else if (rc == ENOTSUP) {
+ enfip->enfi_dpcpu_fw_ids_valid = B_FALSE;
+ enfip->enfi_rx_dpcpu_fw_id = 0;
+ enfip->enfi_tx_dpcpu_fw_id = 0;
+ } else {
+ goto fail3;
+ }
+
+ memcpy(enfip->enfi_mc_fw_version, mc_fw_version, sizeof(mc_fw_version));
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nic_ops_t *enop = enp->en_enop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
+
+ if ((rc = enop->eno_register_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_test_registers(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in size_t count)
+{
+ unsigned int bit;
+ efx_oword_t original;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ while (count > 0) {
+ /* This function is only suitable for registers */
+ EFSYS_ASSERT(rsp->rows == 1);
+
+ /* bit sweep on and off */
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+ for (bit = 0; bit < 128; bit++) {
+ /* Is this bit in the mask? */
+ if (~(rsp->mask.eo_u32[bit >> 5]) & (1 << bit))
+ continue;
+
+ /* Test this bit can be set in isolation */
+ reg = original;
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFX_SET_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ /* Test this bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, rsp->mask);
+ EFX_CLEAR_OWORD_BIT(reg, bit);
+
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &reg,
+ B_TRUE);
+ EFSYS_BAR_READO(enp->en_esbp, rsp->address, &buf,
+ B_TRUE);
+
+ EFX_AND_OWORD(buf, rsp->mask);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail2;
+ }
+ }
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original,
+ B_TRUE);
+
+ --count;
+ ++rsp;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore the old value */
+ EFSYS_BAR_WRITEO(enp->en_esbp, rsp->address, &original, B_TRUE);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nic_test_tables(
+ __in efx_nic_t *enp,
+ __in efx_register_set_t *rsp,
+ __in efx_pattern_type_t pattern,
+ __in size_t count)
+{
+ efx_sram_pattern_fn_t func;
+ unsigned int index;
+ unsigned int address;
+ efx_oword_t reg;
+ efx_oword_t buf;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(pattern < EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[pattern];
+
+ while (count > 0) {
+ /* Write */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_WRITEO(enp->en_esbp, address, &reg, B_TRUE);
+
+ address += rsp->step;
+ }
+
+ /* Read */
+ address = rsp->address;
+ for (index = 0; index < rsp->rows; ++index) {
+ func(2 * index + 0, B_FALSE, &reg.eo_qword[0]);
+ func(2 * index + 1, B_FALSE, &reg.eo_qword[1]);
+ EFX_AND_OWORD(reg, rsp->mask);
+ EFSYS_BAR_READO(enp->en_esbp, address, &buf, B_TRUE);
+ if (memcmp(&reg, &buf, sizeof (reg))) {
+ rc = EIO;
+ goto fail1;
+ }
+
+ address += rsp->step;
+ }
+
+ ++rsp;
+ --count;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_LOOPBACK
+
+extern void
+efx_loopback_mask(
+ __in efx_loopback_kind_t loopback_kind,
+ __out efx_qword_t *maskp)
+{
+ efx_qword_t mask;
+
+ EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS);
+ EFSYS_ASSERT(maskp != NULL);
+
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XPORT == EFX_LOOPBACK_XPORT);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII_WS == EFX_LOOPBACK_XGMII_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS == EFX_LOOPBACK_XAUI_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_FAR ==
+ EFX_LOOPBACK_XAUI_WS_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_WS_NEAR ==
+ EFX_LOOPBACK_XAUI_WS_NEAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_WS == EFX_LOOPBACK_GMII_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS == EFX_LOOPBACK_XFI_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_WS_FAR ==
+ EFX_LOOPBACK_XFI_WS_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS_WS == EFX_LOOPBACK_PHYXS_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT == EFX_LOOPBACK_PMA_INT);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_NEAR == EFX_LOOPBACK_SD_NEAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FAR == EFX_LOOPBACK_SD_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMA_INT_WS ==
+ EFX_LOOPBACK_PMA_INT_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP2_WS ==
+ EFX_LOOPBACK_SD_FEP2_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP1_5_WS ==
+ EFX_LOOPBACK_SD_FEP1_5_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FEP_WS == EFX_LOOPBACK_SD_FEP_WS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SD_FES_WS == EFX_LOOPBACK_SD_FES_WS);
+
+ /* Build bitmask of possible loopback types */
+ EFX_ZERO_QWORD(mask);
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_OFF) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_OFF);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_MAC) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "MAC" grouping has historically been used by drivers to
+ * mean loopbacks supported by on-chip hardware. Keep that
+ * meaning here, and include on-chip PHY layer loopbacks.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_DATA);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMAC);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGXS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGBR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI_FAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_INT);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_NEAR);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_FAR);
+ }
+
+ if ((loopback_kind == EFX_LOOPBACK_KIND_PHY) ||
+ (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
+ /*
+ * The "PHY" grouping has historically been used by drivers to
+ * mean loopbacks supported by off-chip hardware. Keep that
+ * meaning here.
+ */
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GPHY);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PHY_XS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PCS);
+ EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_PMD);
+ }
+
+ *maskp = mask;
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_get_loopback_modes(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
+ MC_CMD_GET_LOOPBACK_MODES_OUT_LEN)];
+ efx_qword_t mask;
+ efx_qword_t modes;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ /*
+ * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree
+ * in efx_loopback_mask() and in siena_phy.c:siena_phy_get_link().
+ */
+ efx_loopback_mask(EFX_LOOPBACK_KIND_ALL, &mask);
+
+ EFX_AND_QWORD(mask,
+ *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_SUGGESTED));
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_100M);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_100FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_1G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_1000FDX] = modes;
+
+ modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_10G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_10000FDX] = modes;
+
+ if (req.emr_out_length_used >=
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) {
+ /* Response includes 40G loopback modes */
+ modes =
+ *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_40G);
+ EFX_AND_QWORD(modes, mask);
+ encp->enc_loopback_types[EFX_LINK_40000FDX] = modes;
+ }
+
+ EFX_ZERO_QWORD(modes);
+ EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]);
+ EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]);
+ encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ __checkReturn efx_rc_t
+efx_nic_calculate_pcie_link_bandwidth(
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t lane_bandwidth;
+ uint32_t total_bandwidth;
+ efx_rc_t rc;
+
+ if ((pcie_link_width == 0) || (pcie_link_width > 16) ||
+ !ISP2(pcie_link_width)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ switch (pcie_link_gen) {
+ case EFX_PCIE_LINK_SPEED_GEN1:
+ /* 2.5 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 2000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN2:
+ /* 5.0 Gb/s raw bandwidth with 8b/10b encoding */
+ lane_bandwidth = 4000;
+ break;
+ case EFX_PCIE_LINK_SPEED_GEN3:
+ /* 8.0 Gb/s raw bandwidth with 128b/130b encoding */
+ lane_bandwidth = 7877;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ total_bandwidth = lane_bandwidth * pcie_link_width;
+ *bandwidth_mbpsp = total_bandwidth;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+ __checkReturn efx_rc_t
+efx_nic_check_pcie_link_speed(
+ __in efx_nic_t *enp,
+ __in uint32_t pcie_link_width,
+ __in uint32_t pcie_link_gen,
+ __out efx_pcie_link_performance_t *resultp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t bandwidth;
+ efx_pcie_link_performance_t result;
+ efx_rc_t rc;
+
+ if ((encp->enc_required_pcie_bandwidth_mbps == 0) ||
+ (pcie_link_width == 0) || (pcie_link_width == 32) ||
+ (pcie_link_gen == 0)) {
+ /*
+ * No usable info on what is required and/or in use. In virtual
+ * machines, sometimes the PCIe link width is reported as 0 or
+ * 32, or the speed as 0.
+ */
+ result = EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH;
+ goto out;
+ }
+
+ /* Calculate the available bandwidth in megabits per second */
+ rc = efx_nic_calculate_pcie_link_bandwidth(pcie_link_width,
+ pcie_link_gen, &bandwidth);
+ if (rc != 0)
+ goto fail1;
+
+ if (bandwidth < encp->enc_required_pcie_bandwidth_mbps) {
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH;
+ } else if (pcie_link_gen < encp->enc_max_pcie_link_gen) {
+ /* The link provides enough bandwidth but not optimal latency */
+ result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY;
+ } else {
+ result = EFX_PCIE_LINK_PERFORMANCE_OPTIMAL;
+ }
+
+out:
+ *resultp = result;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
diff --git a/drivers/net/sfc/base/efx_nvram.c b/drivers/net/sfc/base/efx_nvram.c
new file mode 100644
index 00000000..6ee2a71d
--- /dev/null
+++ b/drivers/net/sfc/base/efx_nvram.c
@@ -0,0 +1,1044 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_SIENA
+
+static const efx_nvram_ops_t __efx_nvram_siena_ops = {
+#if EFSYS_OPT_DIAG
+ siena_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ siena_nvram_type_to_partn, /* envo_type_to_partn */
+ siena_nvram_partn_size, /* envo_partn_size */
+ siena_nvram_partn_rw_start, /* envo_partn_rw_start */
+ siena_nvram_partn_read, /* envo_partn_read */
+ siena_nvram_partn_erase, /* envo_partn_erase */
+ siena_nvram_partn_write, /* envo_partn_write */
+ siena_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ siena_nvram_partn_get_version, /* envo_partn_get_version */
+ siena_nvram_partn_set_version, /* envo_partn_set_version */
+ NULL, /* envo_partn_validate */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_nvram_ops_t __efx_nvram_ef10_ops = {
+#if EFSYS_OPT_DIAG
+ ef10_nvram_test, /* envo_test */
+#endif /* EFSYS_OPT_DIAG */
+ ef10_nvram_type_to_partn, /* envo_type_to_partn */
+ ef10_nvram_partn_size, /* envo_partn_size */
+ ef10_nvram_partn_rw_start, /* envo_partn_rw_start */
+ ef10_nvram_partn_read, /* envo_partn_read */
+ ef10_nvram_partn_erase, /* envo_partn_erase */
+ ef10_nvram_partn_write, /* envo_partn_write */
+ ef10_nvram_partn_rw_finish, /* envo_partn_rw_finish */
+ ef10_nvram_partn_get_version, /* envo_partn_get_version */
+ ef10_nvram_partn_set_version, /* envo_partn_set_version */
+ ef10_nvram_buffer_validate, /* envo_buffer_validate */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_nvram_init(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NVRAM));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ envop = &__efx_nvram_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ envop = &__efx_nvram_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ enp->en_envop = envop;
+ enp->en_mod_flags |= EFX_MOD_NVRAM;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_nvram_test(
+ __in efx_nic_t *enp)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ if ((rc = envop->envo_test(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+ __checkReturn efx_rc_t
+efx_nvram_size(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out size_t *sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_size(enp, partn, sizep)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ *sizep = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_get_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_get_version(enp, partn,
+ subtypep, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_rw_start(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out_opt size_t *chunk_sizep)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_rw_start(enp, partn, chunk_sizep)) != 0)
+ goto fail2;
+
+ enp->en_nvram_locked = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_read_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_read(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_erase(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ unsigned int offset = 0;
+ size_t size = 0;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_size(enp, partn, &size)) != 0)
+ goto fail2;
+
+ if ((rc = envop->envo_partn_erase(enp, partn, offset, size)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_write_chunk(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in unsigned int offset,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_write(enp, partn, offset, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_rw_finish(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT3U(type, !=, EFX_NVRAM_INVALID);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, type);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_rw_finish(enp, partn)) != 0)
+ goto fail2;
+
+ enp->en_nvram_locked = EFX_NVRAM_INVALID;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ enp->en_nvram_locked = EFX_NVRAM_INVALID;
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_nvram_set_version(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_ecount(4) uint16_t version[4])
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+ /*
+ * The Siena implementation of envo_set_version() will attempt to
+ * acquire the NVRAM_UPDATE lock for the DYNAMIC_CONFIG sector.
+ * Therefore, you can't have already acquired the NVRAM_UPDATE lock.
+ */
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if ((rc = envop->envo_partn_set_version(enp, partn, version)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Validate buffer contents (before writing to flash) */
+ __checkReturn efx_rc_t
+efx_nvram_validate(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __in_bcount(partn_size) caddr_t partn_data,
+ __in size_t partn_size)
+{
+ const efx_nvram_ops_t *envop = enp->en_envop;
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+
+
+ if ((rc = envop->envo_type_to_partn(enp, type, &partn)) != 0)
+ goto fail1;
+
+ if (envop->envo_type_to_partn != NULL &&
+ ((rc = envop->envo_buffer_validate(enp, partn,
+ partn_data, partn_size)) != 0))
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+void
+efx_nvram_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NVRAM);
+
+ EFSYS_ASSERT3U(enp->en_nvram_locked, ==, EFX_NVRAM_INVALID);
+
+ enp->en_envop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_NVRAM;
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+/*
+ * Internal MCDI request handling
+ */
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_partitions(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __out unsigned int *npartnp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_PARTITIONS_IN_LEN,
+ MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX)];
+ unsigned int npartn;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_PARTITIONS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_PARTITIONS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ npartn = MCDI_OUT_DWORD(req, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_PARTITIONS_OUT_LEN(npartn)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ if (size < npartn * sizeof (uint32_t)) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ *npartnp = npartn;
+
+ memcpy(data,
+ MCDI_OUT2(req, uint32_t, NVRAM_PARTITIONS_OUT_TYPE_ID),
+ (npartn * sizeof (uint32_t)));
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_metadata(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4],
+ __out_bcount_opt(size) char *descp,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_METADATA_IN_LEN,
+ MC_CMD_NVRAM_METADATA_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_METADATA;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_METADATA_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_METADATA_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_METADATA_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_METADATA_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_SUBTYPE_VALID)) {
+ *subtypep = MCDI_OUT_DWORD(req, NVRAM_METADATA_OUT_SUBTYPE);
+ } else {
+ *subtypep = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_VERSION_VALID)) {
+ version[0] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_W);
+ version[1] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_X);
+ version[2] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Y);
+ version[3] = MCDI_OUT_WORD(req, NVRAM_METADATA_OUT_VERSION_Z);
+ } else {
+ version[0] = version[1] = version[2] = version[3] = 0;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, NVRAM_METADATA_OUT_FLAGS,
+ NVRAM_METADATA_OUT_DESCRIPTION_VALID)) {
+ /* Return optional descrition string */
+ if ((descp != NULL) && (size > 0)) {
+ size_t desclen;
+
+ descp[0] = '\0';
+ desclen = (req.emr_out_length_used
+ - MC_CMD_NVRAM_METADATA_OUT_LEN(0));
+
+ EFSYS_ASSERT3U(desclen, <=,
+ MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM);
+
+ if (size < desclen) {
+ rc = ENOSPC;
+ goto fail3;
+ }
+
+ memcpy(descp, MCDI_OUT2(req, char,
+ NVRAM_METADATA_OUT_DESCRIPTION),
+ desclen);
+
+ /* Ensure string is NUL terminated */
+ descp[desclen] = '\0';
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_info(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out_opt size_t *sizep,
+ __out_opt uint32_t *addressp,
+ __out_opt uint32_t *erase_sizep,
+ __out_opt uint32_t *write_sizep)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_INFO_IN_LEN,
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_INFO_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_INFO_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_INFO_IN_TYPE, partn);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_INFO_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (sizep)
+ *sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_SIZE);
+
+ if (addressp)
+ *addressp = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_PHYSADDR);
+
+ if (erase_sizep)
+ *erase_sizep = MCDI_OUT_DWORD(req, NVRAM_INFO_OUT_ERASESIZE);
+
+ if (write_sizep) {
+ *write_sizep =
+ (req.emr_out_length_used <
+ MC_CMD_NVRAM_INFO_V2_OUT_LEN) ?
+ 0 : MCDI_OUT_DWORD(req, NVRAM_INFO_V2_OUT_WRITESIZE);
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * MC_CMD_NVRAM_UPDATE_START_V2 must be used to support firmware-verified
+ * NVRAM updates. Older firmware will ignore the flags field in the request.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_START_OUT_LEN)];
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_START;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_START_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_START_V2_IN_TYPE, partn);
+
+ MCDI_IN_POPULATE_DWORD_1(req, NVRAM_UPDATE_START_V2_IN_FLAGS,
+ NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size,
+ __in uint32_t mode)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_READ_IN_V2_LEN,
+ MC_CMD_NVRAM_READ_OUT_LENMAX)];
+ efx_rc_t rc;
+
+ if (size > MC_CMD_NVRAM_READ_OUT_LENMAX) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_READ;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_READ_IN_V2_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_READ_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_LENGTH, size);
+ MCDI_IN_SET_DWORD(req, NVRAM_READ_IN_V2_MODE, mode);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_READ_OUT_LEN(size)) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ memcpy(data,
+ MCDI_OUT2(req, uint8_t, NVRAM_READ_OUT_READ_BUFFER),
+ size);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_ERASE_IN_LEN,
+ MC_CMD_NVRAM_ERASE_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_ERASE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_ERASE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_ERASE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_ERASE_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/*
+ * The NVRAM_WRITE MCDI command is a V1 command and so is supported by both
+ * Sienna and EF10 based boards. However EF10 based boards support the use
+ * of this command with payloads up to the maximum MCDI V2 payload length.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in uint32_t offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MCDI_CTL_SDU_LEN_MAX_V1,
+ MCDI_CTL_SDU_LEN_MAX_V2)];
+ efx_rc_t rc;
+ size_t max_data_size;
+
+ max_data_size = enp->en_nic_cfg.enc_mcdi_max_payload_length
+ - MC_CMD_NVRAM_WRITE_IN_LEN(0);
+ EFSYS_ASSERT3U(enp->en_nic_cfg.enc_mcdi_max_payload_length, >, 0);
+ EFSYS_ASSERT3U(max_data_size, <,
+ enp->en_nic_cfg.enc_mcdi_max_payload_length);
+
+ if (size > max_data_size) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_WRITE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_WRITE_IN_LEN(size);
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_WRITE_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_IN_SET_DWORD(req, NVRAM_WRITE_IN_LENGTH, size);
+
+ memcpy(MCDI_IN2(req, uint8_t, NVRAM_WRITE_IN_WRITE_BUFFER),
+ data, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+/*
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2 must be used to support firmware-verified
+ * NVRAM updates. Older firmware will ignore the flags field in the request.
+ */
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_update_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t reboot,
+ __out_opt uint32_t *resultp)
+{
+ const efx_nic_cfg_t *encp = &enp->en_nic_cfg;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN,
+ MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN)];
+ uint32_t result = 0; /* FIXME: use MC_CMD_NVRAM_VERIFY_RC_UNKNOWN */
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_UPDATE_FINISH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_TYPE, partn);
+ MCDI_IN_SET_DWORD(req, NVRAM_UPDATE_FINISH_V2_IN_REBOOT, reboot);
+
+ MCDI_IN_POPULATE_DWORD_1(req, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+ NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT, 1);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (encp->enc_fw_verified_nvram_update_required == B_FALSE) {
+ /* Report success if verified updates are not supported. */
+ result = MC_CMD_NVRAM_VERIFY_RC_SUCCESS;
+ } else {
+ /* Firmware-verified NVRAM updates are required */
+ if (req.emr_out_length_used <
+ MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+ result =
+ MCDI_OUT_DWORD(req, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
+
+ if (result != MC_CMD_NVRAM_VERIFY_RC_SUCCESS) {
+ /* Mandatory verification failed */
+ rc = EINVAL;
+ goto fail3;
+ }
+ }
+
+ if (resultp != NULL)
+ *resultp = result;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Always report verification result */
+ if (resultp != NULL)
+ *resultp = result;
+
+ return (rc);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+efx_mcdi_nvram_test(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TEST_IN_LEN,
+ MC_CMD_NVRAM_TEST_OUT_LEN)];
+ int result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TEST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TEST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TEST_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, NVRAM_TEST_IN_TYPE, partn);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TEST_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ result = MCDI_OUT_DWORD(req, NVRAM_TEST_OUT_RESULT);
+ if (result == MC_CMD_NVRAM_TEST_FAIL) {
+
+ EFSYS_PROBE1(nvram_test_failure, int, partn);
+
+ rc = (EINVAL);
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#endif /* EFSYS_OPT_NVRAM || EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/base/efx_phy.c b/drivers/net/sfc/base/efx_phy.c
new file mode 100644
index 00000000..752cd52e
--- /dev/null
+++ b/drivers/net/sfc/base/efx_phy.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+static const efx_phy_ops_t __efx_phy_siena_ops = {
+ siena_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ siena_phy_reconfigure, /* epo_reconfigure */
+ siena_phy_verify, /* epo_verify */
+ siena_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ siena_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ NULL, /* epo_bist_enable_offline */
+ siena_phy_bist_start, /* epo_bist_start */
+ siena_phy_bist_poll, /* epo_bist_poll */
+ siena_phy_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_phy_ops_t __efx_phy_ef10_ops = {
+ ef10_phy_power, /* epo_power */
+ NULL, /* epo_reset */
+ ef10_phy_reconfigure, /* epo_reconfigure */
+ ef10_phy_verify, /* epo_verify */
+ ef10_phy_oui_get, /* epo_oui_get */
+#if EFSYS_OPT_PHY_STATS
+ ef10_phy_stats_update, /* epo_stats_update */
+#endif /* EFSYS_OPT_PHY_STATS */
+#if EFSYS_OPT_BIST
+ ef10_bist_enable_offline, /* epo_bist_enable_offline */
+ ef10_bist_start, /* epo_bist_start */
+ ef10_bist_poll, /* epo_bist_poll */
+ ef10_bist_stop, /* epo_bist_stop */
+#endif /* EFSYS_OPT_BIST */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_phy_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_phy_ops_t *epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_port = encp->enc_port;
+ epp->ep_phy_type = encp->enc_phy_type;
+
+ /* Hook in operations structure */
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ epop = &__efx_phy_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ epop = &__efx_phy_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ epp->ep_epop = epop;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_verify(enp));
+}
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+
+ __checkReturn efx_rc_t
+efx_phy_led_set(
+ __in efx_nic_t *enp,
+ __in efx_phy_led_mode_t mode)
+{
+ efx_nic_cfg_t *encp = (&enp->en_nic_cfg);
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_phy_led_mode == mode)
+ goto done;
+
+ mask = (1 << EFX_PHY_LED_DEFAULT);
+ mask |= encp->enc_led_mask;
+
+ if (!((1 << mode) & mask)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ EFSYS_ASSERT3U(mode, <, EFX_PHY_LED_NMODES);
+ epp->ep_phy_led_mode = mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ void
+efx_phy_adv_cap_get(
+ __in efx_nic_t *enp,
+ __in uint32_t flag,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+
+ switch (flag) {
+ case EFX_PHY_CAP_CURRENT:
+ *maskp = epp->ep_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_DEFAULT:
+ *maskp = epp->ep_default_adv_cap_mask;
+ break;
+ case EFX_PHY_CAP_PERM:
+ *maskp = epp->ep_phy_cap_mask;
+ break;
+ default:
+ EFSYS_ASSERT(B_FALSE);
+ break;
+ }
+}
+
+ __checkReturn efx_rc_t
+efx_phy_adv_cap_set(
+ __in efx_nic_t *enp,
+ __in uint32_t mask)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ uint32_t old_mask;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if ((mask & ~epp->ep_phy_cap_mask) != 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_adv_cap_mask == mask)
+ goto done;
+
+ old_mask = epp->ep_adv_cap_mask;
+ epp->ep_adv_cap_mask = mask;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail2;
+
+done:
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ epp->ep_adv_cap_mask = old_mask;
+ /* Reconfigure for robustness */
+ if (epop->epo_reconfigure(enp) != 0) {
+ /*
+ * We may have an inconsistent view of our advertised speed
+ * capabilities.
+ */
+ EFSYS_ASSERT(0);
+ }
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_phy_lp_cap_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *maskp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ *maskp = epp->ep_lp_cap_mask;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_oui_get(enp, ouip));
+}
+
+ void
+efx_phy_media_type_get(
+ __in efx_nic_t *enp,
+ __out efx_phy_media_type_t *typep)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ if (epp->ep_module_type != EFX_PHY_MEDIA_INVALID)
+ *typep = epp->ep_module_type;
+ else
+ *typep = epp->ep_fixed_port_type;
+}
+
+ __checkReturn efx_rc_t
+efx_phy_module_get_info(
+ __in efx_nic_t *enp,
+ __in uint8_t dev_addr,
+ __in uint8_t offset,
+ __in uint8_t len,
+ __out_bcount(len) uint8_t *data)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(data != NULL);
+
+ if ((uint32_t)offset + len > 0xff) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_phy_module_get_info(enp, dev_addr,
+ offset, len, data)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#if EFSYS_OPT_NAMES
+
+/* START MKCONFIG GENERATED PhyStatNamesBlock af9ffa24da3bc100 */
+static const char * const __efx_phy_stat_name[] = {
+ "oui",
+ "pma_pmd_link_up",
+ "pma_pmd_rx_fault",
+ "pma_pmd_tx_fault",
+ "pma_pmd_rev_a",
+ "pma_pmd_rev_b",
+ "pma_pmd_rev_c",
+ "pma_pmd_rev_d",
+ "pcs_link_up",
+ "pcs_rx_fault",
+ "pcs_tx_fault",
+ "pcs_ber",
+ "pcs_block_errors",
+ "phy_xs_link_up",
+ "phy_xs_rx_fault",
+ "phy_xs_tx_fault",
+ "phy_xs_align",
+ "phy_xs_sync_a",
+ "phy_xs_sync_b",
+ "phy_xs_sync_c",
+ "phy_xs_sync_d",
+ "an_link_up",
+ "an_master",
+ "an_local_rx_ok",
+ "an_remote_rx_ok",
+ "cl22ext_link_up",
+ "snr_a",
+ "snr_b",
+ "snr_c",
+ "snr_d",
+ "pma_pmd_signal_a",
+ "pma_pmd_signal_b",
+ "pma_pmd_signal_c",
+ "pma_pmd_signal_d",
+ "an_complete",
+ "pma_pmd_rev_major",
+ "pma_pmd_rev_minor",
+ "pma_pmd_rev_micro",
+ "pcs_fw_version_0",
+ "pcs_fw_version_1",
+ "pcs_fw_version_2",
+ "pcs_fw_version_3",
+ "pcs_fw_build_yy",
+ "pcs_fw_build_mm",
+ "pcs_fw_build_dd",
+ "pcs_op_mode",
+};
+
+/* END MKCONFIG GENERATED PhyStatNamesBlock */
+
+ const char *
+efx_phy_stat_name(
+ __in efx_nic_t *enp,
+ __in efx_phy_stat_t type)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_PHY_NSTATS);
+
+ return (__efx_phy_stat_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+ __checkReturn efx_rc_t
+efx_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ return (epop->epo_stats_update(enp, esmp, stat));
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+efx_bist_enable_offline(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ if (epop->epo_bist_enable_offline == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_enable_offline(enp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+
+}
+
+ __checkReturn efx_rc_t
+efx_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, EFX_BIST_TYPE_UNKNOWN);
+
+ if (epop->epo_bist_start == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_start(enp, type)) != 0)
+ goto fail2;
+
+ epp->ep_current_bist = type;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt uint32_t *value_maskp,
+ __out_ecount_opt(count) unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_poll != NULL);
+ if (epop->epo_bist_poll == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = epop->epo_bist_poll(enp, type, resultp, value_maskp,
+ valuesp, count)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(type, !=, EFX_BIST_TYPE_UNKNOWN);
+ EFSYS_ASSERT3U(type, <, EFX_BIST_TYPE_NTYPES);
+ EFSYS_ASSERT3U(epp->ep_current_bist, ==, type);
+
+ EFSYS_ASSERT(epop->epo_bist_stop != NULL);
+
+ if (epop->epo_bist_stop != NULL)
+ epop->epo_bist_stop(enp, type);
+
+ epp->ep_current_bist = EFX_BIST_TYPE_UNKNOWN;
+}
+
+#endif /* EFSYS_OPT_BIST */
+ void
+efx_phy_unprobe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ epp->ep_epop = NULL;
+
+ epp->ep_adv_cap_mask = 0;
+
+ epp->ep_port = 0;
+ epp->ep_phy_type = 0;
+}
diff --git a/drivers/net/sfc/base/efx_phy_ids.h b/drivers/net/sfc/base/efx_phy_ids.h
new file mode 100644
index 00000000..9d9a0f90
--- /dev/null
+++ b/drivers/net/sfc/base/efx_phy_ids.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_PHY_IDS_H
+#define _SYS_EFX_PHY_IDS_H
+
+#define EFX_PHY_NULL 0
+
+typedef enum efx_phy_type_e { /* GENERATED BY scripts/genfwdef */
+ EFX_PHY_TXC43128 = 1,
+ EFX_PHY_SFX7101 = 3,
+ EFX_PHY_QT2022C2 = 4,
+ EFX_PHY_PM8358 = 6,
+ EFX_PHY_SFT9001A = 8,
+ EFX_PHY_QT2025C = 9,
+ EFX_PHY_SFT9001B = 10,
+ EFX_PHY_QLX111V = 12,
+ EFX_PHY_QT2025_KR = 17,
+ EFX_PHY_AEL3020 = 18,
+ EFX_PHY_XFI_FARMI = 19,
+} efx_phy_type_t;
+
+
+#endif /* _SYS_EFX_PHY_IDS_H */
diff --git a/drivers/net/sfc/base/efx_port.c b/drivers/net/sfc/base/efx_port.c
new file mode 100644
index 00000000..518c2a22
--- /dev/null
+++ b/drivers/net/sfc/base/efx_port.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_port_init(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (enp->en_mod_flags & EFX_MOD_PORT) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ enp->en_mod_flags |= EFX_MOD_PORT;
+
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_link_mode = EFX_LINK_UNKNOWN;
+ epp->ep_mac_drain = B_TRUE;
+
+ /* Configure the MAC */
+ if ((rc = efx_mac_select(enp)) != 0)
+ goto fail1;
+
+ epp->ep_emop->emo_reconfigure(enp);
+
+ /* Pick up current phy capababilities */
+ efx_port_poll(enp, NULL);
+
+ /*
+ * Turn on the PHY if available, otherwise reset it, and
+ * reconfigure it with the current configuration.
+ */
+ if (epop->epo_power != NULL) {
+ if ((rc = epop->epo_power(enp, B_TRUE)) != 0)
+ goto fail2;
+ } else {
+ if ((rc = epop->epo_reset(enp)) != 0)
+ goto fail2;
+ }
+
+ EFSYS_ASSERT(enp->en_reset_flags & EFX_RESET_PHY);
+ enp->en_reset_flags &= ~EFX_RESET_PHY;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_port_poll(
+ __in efx_nic_t *enp,
+ __out_opt efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_link_mode_t ignore_link_mode;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(emop != NULL);
+ EFSYS_ASSERT(!epp->ep_mac_stats_pending);
+
+ if (link_modep == NULL)
+ link_modep = &ignore_link_mode;
+
+ if ((rc = emop->emo_poll(enp, link_modep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+efx_port_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ const efx_mac_ops_t *emop = epp->ep_emop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+ EFSYS_ASSERT(emop != NULL);
+
+ EFSYS_ASSERT(link_mode < EFX_LINK_NMODES);
+
+ if (EFX_TEST_QWORD_BIT(encp->enc_loopback_types[link_mode],
+ loopback_type) == 0) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (epp->ep_loopback_type == loopback_type &&
+ epp->ep_loopback_link_mode == link_mode)
+ return (0);
+
+ if ((rc = emop->emo_loopback_set(enp, link_mode, loopback_type)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_NAMES
+
+static const char * const __efx_loopback_type_name[] = {
+ "OFF",
+ "DATA",
+ "GMAC",
+ "XGMII",
+ "XGXS",
+ "XAUI",
+ "GMII",
+ "SGMII",
+ "XGBR",
+ "XFI",
+ "XAUI_FAR",
+ "GMII_FAR",
+ "SGMII_FAR",
+ "XFI_FAR",
+ "GPHY",
+ "PHY_XS",
+ "PCS",
+ "PMA_PMD",
+ "XPORT",
+ "XGMII_WS",
+ "XAUI_WS",
+ "XAUI_WS_FAR",
+ "XAUI_WS_NEAR",
+ "GMII_WS",
+ "XFI_WS",
+ "XFI_WS_FAR",
+ "PHYXS_WS",
+ "PMA_INT",
+ "SD_NEAR",
+ "SD_FAR",
+ "PMA_INT_WS",
+ "SD_FEP2_WS",
+ "SD_FEP1_5_WS",
+ "SD_FEP_WS",
+ "SD_FES_WS",
+};
+
+ __checkReturn const char *
+efx_loopback_type_name(
+ __in efx_nic_t *enp,
+ __in efx_loopback_type_t type)
+{
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__efx_loopback_type_name) ==
+ EFX_LOOPBACK_NTYPES);
+
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(type, <, EFX_LOOPBACK_NTYPES);
+
+ return (__efx_loopback_type_name[type]);
+}
+
+#endif /* EFSYS_OPT_NAMES */
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ void
+efx_port_fini(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PORT);
+
+ EFSYS_ASSERT(epp->ep_mac_drain);
+
+ epp->ep_emop = NULL;
+ epp->ep_mac_type = EFX_MAC_INVALID;
+ epp->ep_mac_drain = B_FALSE;
+
+ /* Turn off the PHY */
+ if (epop->epo_power != NULL)
+ (void) epop->epo_power(enp, B_FALSE);
+
+ enp->en_mod_flags &= ~EFX_MOD_PORT;
+}
diff --git a/drivers/net/sfc/base/efx_regs.h b/drivers/net/sfc/base/efx_regs.h
new file mode 100644
index 00000000..a1a7f9da
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs.h
@@ -0,0 +1,3870 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_REGS_H
+#define _SYS_EFX_REGS_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * FR_AB_EE_VPD_CFG0_REG_SF(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_SF_OFST 0x00000300
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_EE_VPD_CFG0_REG(128bit):
+ * SPI/VPD configuration register 0
+ */
+#define FR_AB_EE_VPD_CFG0_REG_OFST 0x00000140
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPDW_BASE_LBN 64
+#define FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define FRF_AB_EE_VPD_BASE_LBN 32
+#define FRF_AB_EE_VPD_BASE_WIDTH 24
+#define FRF_AB_EE_VPD_LENGTH_LBN 16
+#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define FRF_AB_EE_VPD_EN_LBN 0
+#define FRF_AB_EE_VPD_EN_WIDTH 1
+
+
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG_SF(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_SF_OFST 0x00000320
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL0123_REG(128bit):
+ * PCIE SerDes control register 0 to 3
+ */
+#define FR_AB_PCIE_SD_CTL0123_REG_OFST 0x00000320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define FRF_AB_PCIE_OFFSET_LBN 56
+#define FRF_AB_PCIE_OFFSET_WIDTH 8
+#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_H_LBN 51
+#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define FRF_AB_PCIE_PARRESET_L_LBN 50
+#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define FRF_AB_PCIE_LPBK_LBN 40
+#define FRF_AB_PCIE_LPBK_WIDTH 8
+#define FRF_AB_PCIE_PARLPBK_LBN 32
+#define FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define FFE_AB_PCIE_RXEQCTL_OFF 2
+#define FFE_AB_PCIE_RXEQCTL_MIN 1
+#define FFE_AB_PCIE_RXEQCTL_MAX 0
+#define FRF_AB_PCIE_HIDRV_LBN 8
+#define FRF_AB_PCIE_HIDRV_WIDTH 8
+#define FRF_AB_PCIE_LODRV_LBN 0
+#define FRF_AB_PCIE_LODRV_WIDTH 8
+
+
+/*
+ * FR_AB_PCIE_SD_CTL45_REG_SF(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_SF_OFST 0x00000330
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_SD_CTL45_REG(128bit):
+ * PCIE SerDes control register 4 and 5
+ */
+#define FR_AB_PCIE_SD_CTL45_REG_OFST 0x00000330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_DTX7_LBN 60
+#define FRF_AB_PCIE_DTX7_WIDTH 4
+#define FRF_AB_PCIE_DTX6_LBN 56
+#define FRF_AB_PCIE_DTX6_WIDTH 4
+#define FRF_AB_PCIE_DTX5_LBN 52
+#define FRF_AB_PCIE_DTX5_WIDTH 4
+#define FRF_AB_PCIE_DTX4_LBN 48
+#define FRF_AB_PCIE_DTX4_WIDTH 4
+#define FRF_AB_PCIE_DTX3_LBN 44
+#define FRF_AB_PCIE_DTX3_WIDTH 4
+#define FRF_AB_PCIE_DTX2_LBN 40
+#define FRF_AB_PCIE_DTX2_WIDTH 4
+#define FRF_AB_PCIE_DTX1_LBN 36
+#define FRF_AB_PCIE_DTX1_WIDTH 4
+#define FRF_AB_PCIE_DTX0_LBN 32
+#define FRF_AB_PCIE_DTX0_WIDTH 4
+#define FRF_AB_PCIE_DEQ7_LBN 28
+#define FRF_AB_PCIE_DEQ7_WIDTH 4
+#define FRF_AB_PCIE_DEQ6_LBN 24
+#define FRF_AB_PCIE_DEQ6_WIDTH 4
+#define FRF_AB_PCIE_DEQ5_LBN 20
+#define FRF_AB_PCIE_DEQ5_WIDTH 4
+#define FRF_AB_PCIE_DEQ4_LBN 16
+#define FRF_AB_PCIE_DEQ4_WIDTH 4
+#define FRF_AB_PCIE_DEQ3_LBN 12
+#define FRF_AB_PCIE_DEQ3_WIDTH 4
+#define FRF_AB_PCIE_DEQ2_LBN 8
+#define FRF_AB_PCIE_DEQ2_WIDTH 4
+#define FRF_AB_PCIE_DEQ1_LBN 4
+#define FRF_AB_PCIE_DEQ1_WIDTH 4
+#define FRF_AB_PCIE_DEQ0_LBN 0
+#define FRF_AB_PCIE_DEQ0_WIDTH 4
+
+
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG_SF(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_SF_OFST 0x00000340
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_PCIE_PCS_CTL_STAT_REG(128bit):
+ * PCIE PCS control and status register
+ */
+#define FR_AB_PCIE_PCS_CTL_STAT_REG_OFST 0x00000340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define FRF_AB_PCIE_PRBSERR_LBN 40
+#define FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define FRF_AB_PCIE_PRBSSEL_LBN 0
+#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+
+/*
+ * FR_AB_HW_INIT_REG_SF(128bit):
+ * Hardware initialization register
+ */
+#define FR_AB_HW_INIT_REG_SF_OFST 0x00000350
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_HW_INIT_REG(128bit):
+ * Hardware initialization register
+ */
+#define FR_AZ_HW_INIT_REG_OFST 0x000000c0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define FRF_CZ_TX_MRG_TAGS_LBN 120
+#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define FRF_AZ_TRGT_MASK_ALL_LBN 100
+#define FRF_AZ_TRGT_MASK_ALL_WIDTH 1
+#define FRF_AZ_DOORBELL_DROP_LBN 92
+#define FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define FRF_AB_PE_EIDLE_DIS_LBN 75
+#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define FRF_AZ_FC_BLOCKING_EN_LBN 45
+#define FRF_AZ_FC_BLOCKING_EN_WIDTH 1
+#define FRF_AZ_B2B_REQ_EN_LBN 44
+#define FRF_AZ_B2B_REQ_EN_WIDTH 1
+#define FRF_AZ_POST_WR_MASK_LBN 40
+#define FRF_AZ_POST_WR_MASK_WIDTH 4
+#define FRF_AZ_TLP_TC_LBN 34
+#define FRF_AZ_TLP_TC_WIDTH 3
+#define FRF_AZ_TLP_ATTR_LBN 32
+#define FRF_AZ_TLP_ATTR_WIDTH 2
+#define FRF_AB_INTB_VEC_LBN 24
+#define FRF_AB_INTB_VEC_WIDTH 5
+#define FRF_AB_INTA_VEC_LBN 16
+#define FRF_AB_INTA_VEC_WIDTH 5
+#define FRF_AZ_WD_TIMER_LBN 8
+#define FRF_AZ_WD_TIMER_WIDTH 8
+#define FRF_AZ_US_DISABLE_LBN 5
+#define FRF_AZ_US_DISABLE_WIDTH 1
+#define FRF_AZ_TLP_EP_LBN 4
+#define FRF_AZ_TLP_EP_WIDTH 1
+#define FRF_AZ_ATTR_SEL_LBN 3
+#define FRF_AZ_ATTR_SEL_WIDTH 1
+#define FRF_AZ_TD_SEL_LBN 1
+#define FRF_AZ_TD_SEL_WIDTH 1
+#define FRF_AZ_TLP_TD_LBN 0
+#define FRF_AZ_TLP_TD_WIDTH 1
+
+
+/*
+ * FR_AB_NIC_STAT_REG_SF(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_SF_OFST 0x00000360
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_NIC_STAT_REG(128bit):
+ * NIC status register
+ */
+#define FR_AB_NIC_STAT_REG_OFST 0x00000200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_BB_AER_DIS_LBN 34
+#define FRF_BB_AER_DIS_WIDTH 1
+#define FRF_BB_EE_STRAP_EN_LBN 31
+#define FRF_BB_EE_STRAP_EN_WIDTH 1
+#define FRF_BB_EE_STRAP_LBN 24
+#define FRF_BB_EE_STRAP_WIDTH 4
+#define FRF_BB_REVISION_ID_LBN 17
+#define FRF_BB_REVISION_ID_WIDTH 7
+#define FRF_AB_ONCHIP_SRAM_LBN 16
+#define FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define FRF_AB_SF_PRST_LBN 9
+#define FRF_AB_SF_PRST_WIDTH 1
+#define FRF_AB_EE_PRST_LBN 8
+#define FRF_AB_EE_PRST_WIDTH 1
+#define FRF_AB_ATE_MODE_LBN 3
+#define FRF_AB_ATE_MODE_WIDTH 1
+#define FRF_AB_STRAP_PINS_LBN 0
+#define FRF_AB_STRAP_PINS_WIDTH 3
+
+
+/*
+ * FR_AB_GLB_CTL_REG_SF(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_SF_OFST 0x00000370
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AB_GLB_CTL_REG(128bit):
+ * Global control register
+ */
+#define FR_AB_GLB_CTL_REG_OFST 0x00000220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define FRF_AA_PCIX_RST_CTL_LBN 60
+#define FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define FRF_BB_BIU_RST_CTL_LBN 60
+#define FRF_BB_BIU_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define FRF_AB_XGRX_RST_CTL_LBN 56
+#define FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define FRF_AB_XGTX_RST_CTL_LBN 55
+#define FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define FRF_AB_EM_RST_CTL_LBN 54
+#define FRF_AB_EM_RST_CTL_WIDTH 1
+#define FRF_AB_EV_RST_CTL_LBN 53
+#define FRF_AB_EV_RST_CTL_WIDTH 1
+#define FRF_AB_SR_RST_CTL_LBN 52
+#define FRF_AB_SR_RST_CTL_WIDTH 1
+#define FRF_AB_RX_RST_CTL_LBN 51
+#define FRF_AB_RX_RST_CTL_WIDTH 1
+#define FRF_AB_TX_RST_CTL_LBN 50
+#define FRF_AB_TX_RST_CTL_WIDTH 1
+#define FRF_AB_EE_RST_CTL_LBN 49
+#define FRF_AB_EE_RST_CTL_WIDTH 1
+#define FRF_AB_CS_RST_CTL_LBN 48
+#define FRF_AB_CS_RST_CTL_WIDTH 1
+#define FRF_AB_HOT_RST_CTL_LBN 40
+#define FRF_AB_HOT_RST_CTL_WIDTH 2
+#define FRF_AB_RST_EXT_PHY_LBN 31
+#define FRF_AB_RST_EXT_PHY_WIDTH 1
+#define FRF_AB_RST_XAUI_SD_LBN 30
+#define FRF_AB_RST_XAUI_SD_WIDTH 1
+#define FRF_AB_RST_PCIE_SD_LBN 29
+#define FRF_AB_RST_PCIE_SD_WIDTH 1
+#define FRF_AA_RST_PCIX_LBN 28
+#define FRF_AA_RST_PCIX_WIDTH 1
+#define FRF_BB_RST_BIU_LBN 28
+#define FRF_BB_RST_BIU_WIDTH 1
+#define FRF_AB_RST_PCIE_STKY_LBN 27
+#define FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define FRF_AB_RST_PCIE_CORE_LBN 25
+#define FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define FRF_AB_RST_XGRX_LBN 24
+#define FRF_AB_RST_XGRX_WIDTH 1
+#define FRF_AB_RST_XGTX_LBN 23
+#define FRF_AB_RST_XGTX_WIDTH 1
+#define FRF_AB_RST_EM_LBN 22
+#define FRF_AB_RST_EM_WIDTH 1
+#define FRF_AB_RST_EV_LBN 21
+#define FRF_AB_RST_EV_WIDTH 1
+#define FRF_AB_RST_SR_LBN 20
+#define FRF_AB_RST_SR_WIDTH 1
+#define FRF_AB_RST_RX_LBN 19
+#define FRF_AB_RST_RX_WIDTH 1
+#define FRF_AB_RST_TX_LBN 18
+#define FRF_AB_RST_TX_WIDTH 1
+#define FRF_AB_RST_SF_LBN 17
+#define FRF_AB_RST_SF_WIDTH 1
+#define FRF_AB_RST_CS_LBN 16
+#define FRF_AB_RST_CS_WIDTH 1
+#define FRF_AB_INT_RST_DUR_LBN 4
+#define FRF_AB_INT_RST_DUR_WIDTH 3
+#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define FRF_AB_SWRST_LBN 0
+#define FRF_AB_SWRST_WIDTH 1
+
+
+/*
+ * FR_AZ_IOM_IND_ADR_REG(32bit):
+ * IO-mapped indirect access address register
+ */
+#define FR_AZ_IOM_IND_ADR_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_LBN 24
+#define FRF_AZ_IOM_AUTO_ADR_INC_EN_WIDTH 1
+#define FRF_AZ_IOM_IND_ADR_LBN 0
+#define FRF_AZ_IOM_IND_ADR_WIDTH 24
+
+
+/*
+ * FR_AZ_IOM_IND_DAT_REG(32bit):
+ * IO-mapped indirect access data register
+ */
+#define FR_AZ_IOM_IND_DAT_REG_OFST 0x00000004
+/* falcona0,falconb0,sienaa0=net_func_bar0 */
+
+#define FRF_AZ_IOM_IND_DAT_LBN 0
+#define FRF_AZ_IOM_IND_DAT_WIDTH 32
+
+
+/*
+ * FR_AZ_ADR_REGION_REG(128bit):
+ * Address region register
+ */
+#define FR_AZ_ADR_REGION_REG_OFST 0x00000000
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ADR_REGION3_LBN 96
+#define FRF_AZ_ADR_REGION3_WIDTH 18
+#define FRF_AZ_ADR_REGION2_LBN 64
+#define FRF_AZ_ADR_REGION2_WIDTH 18
+#define FRF_AZ_ADR_REGION1_LBN 32
+#define FRF_AZ_ADR_REGION1_WIDTH 18
+#define FRF_AZ_ADR_REGION0_LBN 0
+#define FRF_AZ_ADR_REGION0_WIDTH 18
+
+
+/*
+ * FR_AZ_INT_EN_REG_KER(128bit):
+ * Kernel driver Interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_KER_OFST 0x00000010
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_KER_INT_CHAR_LBN 4
+#define FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define FRF_AZ_KER_INT_KER_LBN 3
+#define FRF_AZ_KER_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_EN_REG_CHAR(128bit):
+ * Char Driver interrupt enable register
+ */
+#define FR_AZ_INT_EN_REG_CHAR_OFST 0x00000020
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_CHAR_INT_LEVE_SEL_LBN 8
+#define FRF_AZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define FRF_AZ_CHAR_INT_CHAR_LBN 4
+#define FRF_AZ_CHAR_INT_CHAR_WIDTH 1
+#define FRF_AZ_CHAR_INT_KER_LBN 3
+#define FRF_AZ_CHAR_INT_KER_WIDTH 1
+#define FRF_AZ_DRV_INT_EN_CHAR_LBN 0
+#define FRF_AZ_DRV_INT_EN_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_INT_ADR_REG_KER(128bit):
+ * Interrupt host address for Kernel driver
+ */
+#define FR_AZ_INT_ADR_REG_KER_OFST 0x00000030
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define FRF_AZ_INT_ADR_KER_LBN 0
+#define FRF_AZ_INT_ADR_KER_WIDTH 64
+#define FRF_AZ_INT_ADR_KER_DW0_LBN 0
+#define FRF_AZ_INT_ADR_KER_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_KER_DW1_LBN 32
+#define FRF_AZ_INT_ADR_KER_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_INT_ADR_REG_CHAR(128bit):
+ * Interrupt host address for Char driver
+ */
+#define FR_AZ_INT_ADR_REG_CHAR_OFST 0x00000040
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define FRF_AZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define FRF_AZ_INT_ADR_CHAR_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_WIDTH 64
+#define FRF_AZ_INT_ADR_CHAR_DW0_LBN 0
+#define FRF_AZ_INT_ADR_CHAR_DW0_WIDTH 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_LBN 32
+#define FRF_AZ_INT_ADR_CHAR_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_KER(32bit):
+ * Kernel interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_KER_OFST 0x00000050
+/* falcona0=net_func_bar2 */
+
+#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+
+/*
+ * FR_BZ_INT_ISR0_REG(128bit):
+ * Function 0 Interrupt Acknowlege Status register
+ */
+#define FR_BZ_INT_ISR0_REG_OFST 0x00000090
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_INT_ISR_REG_LBN 0
+#define FRF_BZ_INT_ISR_REG_WIDTH 64
+#define FRF_BZ_INT_ISR_REG_DW0_LBN 0
+#define FRF_BZ_INT_ISR_REG_DW0_WIDTH 32
+#define FRF_BZ_INT_ISR_REG_DW1_LBN 32
+#define FRF_BZ_INT_ISR_REG_DW1_WIDTH 32
+
+
+/*
+ * FR_AB_EE_SPI_HCMD_REG(128bit):
+ * SPI host command register
+ */
+#define FR_AB_EE_SPI_HCMD_REG_OFST 0x00000100
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+
+/*
+ * FR_CZ_USR_EV_CFG(32bit):
+ * User Level Event Configuration register
+ */
+#define FR_CZ_USR_EV_CFG_OFST 0x00000100
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_USREV_DIS_LBN 16
+#define FRF_CZ_USREV_DIS_WIDTH 1
+#define FRF_CZ_DFLT_EVQ_LBN 0
+#define FRF_CZ_DFLT_EVQ_WIDTH 10
+
+
+/*
+ * FR_AB_EE_SPI_HADR_REG(128bit):
+ * SPI host address register
+ */
+#define FR_AB_EE_SPI_HADR_REG_OFST 0x00000110
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+
+/*
+ * FR_AB_EE_SPI_HDATA_REG(128bit):
+ * SPI host data register
+ */
+#define FR_AB_EE_SPI_HDATA_REG_OFST 0x00000120
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_SPI_HDATA3_LBN 96
+#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA2_LBN 64
+#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA1_LBN 32
+#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define FRF_AB_EE_SPI_HDATA0_LBN 0
+#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+
+/*
+ * FR_AB_EE_BASE_PAGE_REG(128bit):
+ * Expansion ROM base mirror register
+ */
+#define FR_AB_EE_BASE_PAGE_REG_OFST 0x00000130
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_EXPROM_MASK_LBN 16
+#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+
+/*
+ * FR_AB_EE_VPD_SW_CNTL_REG(128bit):
+ * VPD access SW control register
+ */
+#define FR_AB_EE_VPD_SW_CNTL_REG_OFST 0x00000150
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+
+/*
+ * FR_AB_EE_VPD_SW_DATA_REG(128bit):
+ * VPD access SW data register
+ */
+#define FR_AB_EE_VPD_SW_DATA_REG_OFST 0x00000160
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+
+/*
+ * FR_BB_PCIE_CORE_INDIRECT_REG(64bit):
+ * Indirect Access to PCIE Core registers
+ */
+#define FR_BB_PCIE_CORE_INDIRECT_REG_OFST 0x000001f0
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+
+/*
+ * FR_AB_GPIO_CTL_REG(128bit):
+ * GPIO control register
+ */
+#define FR_AB_GPIO_CTL_REG_OFST 0x00000210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GPIO15_OEN_LBN 63
+#define FRF_AB_GPIO15_OEN_WIDTH 1
+#define FRF_AB_GPIO14_OEN_LBN 62
+#define FRF_AB_GPIO14_OEN_WIDTH 1
+#define FRF_AB_GPIO13_OEN_LBN 61
+#define FRF_AB_GPIO13_OEN_WIDTH 1
+#define FRF_AB_GPIO12_OEN_LBN 60
+#define FRF_AB_GPIO12_OEN_WIDTH 1
+#define FRF_AB_GPIO11_OEN_LBN 59
+#define FRF_AB_GPIO11_OEN_WIDTH 1
+#define FRF_AB_GPIO10_OEN_LBN 58
+#define FRF_AB_GPIO10_OEN_WIDTH 1
+#define FRF_AB_GPIO9_OEN_LBN 57
+#define FRF_AB_GPIO9_OEN_WIDTH 1
+#define FRF_AB_GPIO8_OEN_LBN 56
+#define FRF_AB_GPIO8_OEN_WIDTH 1
+#define FRF_AB_GPIO15_OUT_LBN 55
+#define FRF_AB_GPIO15_OUT_WIDTH 1
+#define FRF_AB_GPIO14_OUT_LBN 54
+#define FRF_AB_GPIO14_OUT_WIDTH 1
+#define FRF_AB_GPIO13_OUT_LBN 53
+#define FRF_AB_GPIO13_OUT_WIDTH 1
+#define FRF_AB_GPIO12_OUT_LBN 52
+#define FRF_AB_GPIO12_OUT_WIDTH 1
+#define FRF_AB_GPIO11_OUT_LBN 51
+#define FRF_AB_GPIO11_OUT_WIDTH 1
+#define FRF_AB_GPIO10_OUT_LBN 50
+#define FRF_AB_GPIO10_OUT_WIDTH 1
+#define FRF_AB_GPIO9_OUT_LBN 49
+#define FRF_AB_GPIO9_OUT_WIDTH 1
+#define FRF_AB_GPIO8_OUT_LBN 48
+#define FRF_AB_GPIO8_OUT_WIDTH 1
+#define FRF_AB_GPIO15_IN_LBN 47
+#define FRF_AB_GPIO15_IN_WIDTH 1
+#define FRF_AB_GPIO14_IN_LBN 46
+#define FRF_AB_GPIO14_IN_WIDTH 1
+#define FRF_AB_GPIO13_IN_LBN 45
+#define FRF_AB_GPIO13_IN_WIDTH 1
+#define FRF_AB_GPIO12_IN_LBN 44
+#define FRF_AB_GPIO12_IN_WIDTH 1
+#define FRF_AB_GPIO11_IN_LBN 43
+#define FRF_AB_GPIO11_IN_WIDTH 1
+#define FRF_AB_GPIO10_IN_LBN 42
+#define FRF_AB_GPIO10_IN_WIDTH 1
+#define FRF_AB_GPIO9_IN_LBN 41
+#define FRF_AB_GPIO9_IN_WIDTH 1
+#define FRF_AB_GPIO8_IN_LBN 40
+#define FRF_AB_GPIO8_IN_WIDTH 1
+#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define FRF_BB_CLK156_OUT_EN_LBN 31
+#define FRF_BB_CLK156_OUT_EN_WIDTH 1
+#define FRF_BB_USE_NIC_CLK_LBN 30
+#define FRF_BB_USE_NIC_CLK_WIDTH 1
+#define FRF_AB_GPIO5_OEN_LBN 29
+#define FRF_AB_GPIO5_OEN_WIDTH 1
+#define FRF_AB_GPIO4_OEN_LBN 28
+#define FRF_AB_GPIO4_OEN_WIDTH 1
+#define FRF_AB_GPIO3_OEN_LBN 27
+#define FRF_AB_GPIO3_OEN_WIDTH 1
+#define FRF_AB_GPIO2_OEN_LBN 26
+#define FRF_AB_GPIO2_OEN_WIDTH 1
+#define FRF_AB_GPIO1_OEN_LBN 25
+#define FRF_AB_GPIO1_OEN_WIDTH 1
+#define FRF_AB_GPIO0_OEN_LBN 24
+#define FRF_AB_GPIO0_OEN_WIDTH 1
+#define FRF_AB_GPIO5_OUT_LBN 21
+#define FRF_AB_GPIO5_OUT_WIDTH 1
+#define FRF_AB_GPIO4_OUT_LBN 20
+#define FRF_AB_GPIO4_OUT_WIDTH 1
+#define FRF_AB_GPIO3_OUT_LBN 19
+#define FRF_AB_GPIO3_OUT_WIDTH 1
+#define FRF_AB_GPIO2_OUT_LBN 18
+#define FRF_AB_GPIO2_OUT_WIDTH 1
+#define FRF_AB_GPIO1_OUT_LBN 17
+#define FRF_AB_GPIO1_OUT_WIDTH 1
+#define FRF_AB_GPIO0_OUT_LBN 16
+#define FRF_AB_GPIO0_OUT_WIDTH 1
+#define FRF_AB_GPIO5_IN_LBN 13
+#define FRF_AB_GPIO5_IN_WIDTH 1
+#define FRF_AB_GPIO4_IN_LBN 12
+#define FRF_AB_GPIO4_IN_WIDTH 1
+#define FRF_AB_GPIO3_IN_LBN 11
+#define FRF_AB_GPIO3_IN_WIDTH 1
+#define FRF_AB_GPIO2_IN_LBN 10
+#define FRF_AB_GPIO2_IN_WIDTH 1
+#define FRF_AB_GPIO1_IN_LBN 9
+#define FRF_AB_GPIO1_IN_WIDTH 1
+#define FRF_AB_GPIO0_IN_LBN 8
+#define FRF_AB_GPIO0_IN_WIDTH 1
+#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_KER(128bit):
+ * Fatal interrupt register for Kernel
+ */
+#define FR_AZ_FATAL_INTR_REG_KER_OFST 0x00000230
+/* falcona0,falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+
+/*
+ * FR_AZ_FATAL_INTR_REG_CHAR(128bit):
+ * Fatal interrupt register for Char
+ */
+#define FR_AZ_FATAL_INTR_REG_CHAR_OFST 0x00000240
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define FRF_AB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define FRF_AZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define FRF_AZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define FRF_AZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define FRF_AZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define FRF_AZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define FRF_AZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define FRF_AZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define FRF_AZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define FRF_AZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define FRF_AB_PCI_BUSERR_INT_CHAR_LBN 11
+#define FRF_AB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRAM_OOB_INT_CHAR_LBN 10
+#define FRF_AZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define FRF_AZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define FRF_AZ_MEM_PERR_INT_CHAR_LBN 8
+#define FRF_AZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define FRF_AZ_RBUF_OWN_INT_CHAR_LBN 7
+#define FRF_AZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TBUF_OWN_INT_CHAR_LBN 6
+#define FRF_AZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define FRF_AZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define FRF_AZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVQ_OWN_INT_CHAR_LBN 3
+#define FRF_AZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define FRF_AZ_EVF_OFLO_INT_CHAR_LBN 2
+#define FRF_AZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_LBN 1
+#define FRF_AZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define FRF_AZ_SRM_PERR_INT_CHAR_LBN 0
+#define FRF_AZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+
+/*
+ * FR_AZ_DP_CTRL_REG(128bit):
+ * Datapath control register
+ */
+#define FR_AZ_DP_CTRL_REG_OFST 0x00000250
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_FLS_EVQ_ID_LBN 0
+#define FRF_AZ_FLS_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_MEM_STAT_REG(128bit):
+ * Memory status register
+ */
+#define FR_AZ_MEM_STAT_REG_OFST 0x00000260
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MEM_PERR_VEC_LBN 53
+#define FRF_AB_MEM_PERR_VEC_WIDTH 40
+#define FRF_AB_MEM_PERR_VEC_DW0_LBN 53
+#define FRF_AB_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_AB_MEM_PERR_VEC_DW1_LBN 85
+#define FRF_AB_MEM_PERR_VEC_DW1_WIDTH 6
+#define FRF_AB_MBIST_CORR_LBN 38
+#define FRF_AB_MBIST_CORR_WIDTH 15
+#define FRF_AB_MBIST_ERR_LBN 0
+#define FRF_AB_MBIST_ERR_WIDTH 40
+#define FRF_AB_MBIST_ERR_DW0_LBN 0
+#define FRF_AB_MBIST_ERR_DW0_WIDTH 32
+#define FRF_AB_MBIST_ERR_DW1_LBN 32
+#define FRF_AB_MBIST_ERR_DW1_WIDTH 6
+#define FRF_CZ_MEM_PERR_VEC_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
+#define FRF_CZ_MEM_PERR_VEC_DW0_LBN 0
+#define FRF_CZ_MEM_PERR_VEC_DW0_WIDTH 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_LBN 32
+#define FRF_CZ_MEM_PERR_VEC_DW1_WIDTH 3
+
+
+/*
+ * FR_PORT0_CS_DEBUG_REG(128bit):
+ * Debug register
+ */
+
+#define FR_AZ_CS_DEBUG_REG_OFST 0x00000270
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define FRF_CZ_CS_PORT_NUM_LBN 40
+#define FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_RESERVED_LBN 36
+#define FRF_CZ_CS_RESERVED_WIDTH 4
+#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define FRF_CZ_CS_PORT_FPE_DW0_LBN 1
+#define FRF_CZ_CS_PORT_FPE_DW0_WIDTH 32
+#define FRF_CZ_CS_PORT_FPE_DW1_LBN 33
+#define FRF_CZ_CS_PORT_FPE_DW1_WIDTH 3
+#define FRF_CZ_CS_PORT_FPE_LBN 1
+#define FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define FRF_AZ_CS_DEBUG_EN_LBN 0
+#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_DRIVER_REG(128bit):
+ * Driver scratch register [0-7]
+ */
+#define FR_AZ_DRIVER_REG_OFST 0x00000280
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_DRIVER_REG_STEP 16
+#define FR_AZ_DRIVER_REG_ROWS 8
+
+#define FRF_AZ_DRIVER_DW0_LBN 0
+#define FRF_AZ_DRIVER_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_ALTERA_BUILD_REG(128bit):
+ * Altera build register
+ */
+#define FR_AZ_ALTERA_BUILD_REG_OFST 0x00000300
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+
+/*
+ * FR_AZ_CSR_SPARE_REG(128bit):
+ * Spare register
+ */
+#define FR_AZ_CSR_SPARE_REG_OFST 0x00000310
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_LBN 72
+#define FRF_AZ_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define FRF_AZ_MEM_PERR_EN_LBN 64
+#define FRF_AZ_MEM_PERR_EN_WIDTH 38
+#define FRF_AZ_MEM_PERR_EN_DW0_LBN 64
+#define FRF_AZ_MEM_PERR_EN_DW0_WIDTH 32
+#define FRF_AZ_MEM_PERR_EN_DW1_LBN 96
+#define FRF_AZ_MEM_PERR_EN_DW1_WIDTH 6
+#define FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+
+/*
+ * FR_BZ_DEBUG_DATA_OUT_REG(128bit):
+ * Live Debug and Debug 2 out ports
+ */
+#define FR_BZ_DEBUG_DATA_OUT_REG_OFST 0x00000350
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_DEBUG2_PORT_LBN 25
+#define FRF_BZ_DEBUG2_PORT_WIDTH 15
+#define FRF_BZ_DEBUG1_PORT_LBN 0
+#define FRF_BZ_DEBUG1_PORT_WIDTH 25
+
+
+/*
+ * FR_BZ_EVQ_RPTR_REGP0(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BZ_EVQ_RPTR_REGP0_OFST 0x00000400
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_EVQ_RPTR_REGP0_STEP 8192
+#define FR_BZ_EVQ_RPTR_REGP0_ROWS 1024
+/*
+ * FR_AA_EVQ_RPTR_REG_KER(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AA_EVQ_RPTR_REG_KER_OFST 0x00011b00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_RPTR_REG_KER_STEP 4
+#define FR_AA_EVQ_RPTR_REG_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_RPTR_REG(32bit):
+ * Event queue read pointer register
+ */
+#define FR_AZ_EVQ_RPTR_REG_OFST 0x00fa0000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_RPTR_REG_STEP 16
+#define FR_AB_EVQ_RPTR_REG_ROWS 4096
+#define FR_CZ_EVQ_RPTR_REG_ROWS 1024
+/*
+ * FR_BB_EVQ_RPTR_REGP123(32bit):
+ * Event queue read pointer register
+ */
+#define FR_BB_EVQ_RPTR_REGP123_OFST 0x01000400
+/* falconb0=net_func_bar2 */
+#define FR_BB_EVQ_RPTR_REGP123_STEP 8192
+#define FR_BB_EVQ_RPTR_REGP123_ROWS 3072
+
+#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define FRF_AZ_EVQ_RPTR_LBN 0
+#define FRF_AZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * FR_BZ_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_BZ_TIMER_COMMAND_REGP0_OFST 0x00000420
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_BZ_TIMER_COMMAND_REGP0_ROWS 1024
+/*
+ * FR_AA_TIMER_COMMAND_REG_KER(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REG_KER_OFST 0x00000420
+/* falcona0=net_func_bar2 */
+#define FR_AA_TIMER_COMMAND_REG_KER_STEP 8192
+#define FR_AA_TIMER_COMMAND_REG_KER_ROWS 4
+/*
+ * FR_AB_TIMER_COMMAND_REGP123(128bit):
+ * Timer Command Registers
+ */
+#define FR_AB_TIMER_COMMAND_REGP123_OFST 0x01000420
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TIMER_COMMAND_REGP123_STEP 8192
+#define FR_AB_TIMER_COMMAND_REGP123_ROWS 3072
+/*
+ * FR_AA_TIMER_COMMAND_REGP0(128bit):
+ * Timer Command Registers
+ */
+#define FR_AA_TIMER_COMMAND_REGP0_OFST 0x00008420
+/* falcona0=char_func_bar0 */
+#define FR_AA_TIMER_COMMAND_REGP0_STEP 8192
+#define FR_AA_TIMER_COMMAND_REGP0_ROWS 1020
+
+#define FRF_CZ_TC_TIMER_MODE_LBN 14
+#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define FRF_AB_TC_TIMER_MODE_LBN 12
+#define FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define FRF_CZ_TC_TIMER_VAL_LBN 0
+#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define FRF_AB_TC_TIMER_VAL_LBN 0
+#define FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_AZ_DRV_EV_REG(128bit):
+ * Driver generated event register
+ */
+#define FR_AZ_DRV_EV_REG_OFST 0x00000440
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_DRV_EV_QID_LBN 64
+#define FRF_AZ_DRV_EV_QID_WIDTH 12
+#define FRF_AZ_DRV_EV_DATA_LBN 0
+#define FRF_AZ_DRV_EV_DATA_WIDTH 64
+#define FRF_AZ_DRV_EV_DATA_DW0_LBN 0
+#define FRF_AZ_DRV_EV_DATA_DW0_WIDTH 32
+#define FRF_AZ_DRV_EV_DATA_DW1_LBN 32
+#define FRF_AZ_DRV_EV_DATA_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_EVQ_CTL_REG(128bit):
+ * Event queue control register
+ */
+#define FR_AZ_EVQ_CTL_REG_OFST 0x00000450
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+
+/*
+ * FR_AZ_EVQ_CNT1_REG(128bit):
+ * Event counter 1 register
+ */
+#define FR_AZ_EVQ_CNT1_REG_OFST 0x00000460
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_AZ_EVQ_CNT2_REG(128bit):
+ * Event counter 2 register
+ */
+#define FR_AZ_EVQ_CNT2_REG_OFST 0x00000470
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+
+/*
+ * FR_CZ_USR_EV_REG(32bit):
+ * Event mailbox register
+ */
+#define FR_CZ_USR_EV_REG_OFST 0x00000540
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_USR_EV_REG_STEP 8192
+#define FR_CZ_USR_EV_REG_ROWS 1024
+
+#define FRF_CZ_USR_EV_DATA_LBN 0
+#define FRF_CZ_USR_EV_DATA_WIDTH 32
+
+
+/*
+ * FR_AZ_BUF_TBL_CFG_REG(128bit):
+ * Buffer table configuration register
+ */
+#define FR_AZ_BUF_TBL_CFG_REG_OFST 0x00000600
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_TBL_MODE_LBN 3
+#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+
+/*
+ * FR_AZ_SRM_RX_DC_CFG_REG(128bit):
+ * SRAM receive descriptor cache configuration register
+ */
+#define FR_AZ_SRM_RX_DC_CFG_REG_OFST 0x00000610
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_TX_DC_CFG_REG(128bit):
+ * SRAM transmit descriptor cache configuration register
+ */
+#define FR_AZ_SRM_TX_DC_CFG_REG_OFST 0x00000620
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_SF_OFST 0x00000380
+/* falcona0,falconb0=eeprom_flash */
+/*
+ * FR_AZ_SRM_CFG_REG(128bit):
+ * SRAM configuration register
+ */
+#define FR_AZ_SRM_CFG_REG_OFST 0x00000630
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define FRF_AZ_SRM_INIT_EN_LBN 3
+#define FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define FRF_AZ_SRM_NUM_BANK_LBN 2
+#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+
+/*
+ * FR_AZ_BUF_TBL_UPD_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_BUF_TBL_UPD_REG_OFST 0x00000650
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_BUF_UPD_CMD_LBN 63
+#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_CMD_LBN 62
+#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+
+/*
+ * FR_AZ_SRM_UPD_EVQ_REG(128bit):
+ * Buffer table update register
+ */
+#define FR_AZ_SRM_UPD_EVQ_REG_OFST 0x00000660
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_SRAM_PARITY_REG(128bit):
+ * SRAM parity register.
+ */
+#define FR_AZ_SRAM_PARITY_REG_OFST 0x00000670
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_BYPASS_ECC_LBN 3
+#define FRF_CZ_BYPASS_ECC_WIDTH 1
+#define FRF_CZ_SEC_INT_LBN 2
+#define FRF_CZ_SEC_INT_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+#define FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_CFG_REG(128bit):
+ * Receive configuration register
+ */
+#define FR_AZ_RX_CFG_REG_OFST 0x00000800
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define FRF_BZ_RX_TCP_SUP_LBN 48
+#define FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define FRF_BZ_RX_INGR_EN_LBN 47
+#define FRF_BZ_RX_INGR_EN_WIDTH 1
+#define FRF_BZ_RX_IP_HASH_LBN 46
+#define FRF_BZ_RX_IP_HASH_WIDTH 1
+#define FRF_BZ_RX_HASH_ALG_LBN 45
+#define FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define FRF_BZ_RX_XON_TX_TH_LBN 33
+#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_OWNERR_CTL_LBN 30
+#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_RX_XON_TX_TH_LBN 25
+#define FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XON_MAC_TH_LBN 6
+#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_RX_FILTER_CTL_REG(128bit):
+ * Receive filter control registers
+ */
+#define FR_AZ_RX_FILTER_CTL_REG_OFST 0x00000810
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define FRF_AZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_NUM_KER_LBN 24
+#define FRF_AZ_NUM_KER_WIDTH 2
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define FRF_AZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define FRF_AZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define FRF_AZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+
+/*
+ * FR_AZ_RX_FLUSH_DESCQ_REG(128bit):
+ * Receive flush descriptor queue register
+ */
+#define FR_AZ_RX_FLUSH_DESCQ_REG_OFST 0x00000820
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_BZ_RX_DESC_UPD_REGP0_OFST 0x00000830
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_RX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_RX_DESC_UPD_REG_KER(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REG_KER_OFST 0x00000830
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_RX_DESC_UPD_REG_KER_ROWS 4
+/*
+ * FR_AB_RX_DESC_UPD_REGP123(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AB_RX_DESC_UPD_REGP123_OFST 0x01000830
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_RX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_RX_DESC_UPD_REGP0(128bit):
+ * Receive descriptor update register.
+ */
+#define FR_AA_RX_DESC_UPD_REGP0_OFST 0x00008830
+/* falcona0=char_func_bar0 */
+#define FR_AA_RX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_RX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_RX_DESC_WPTR_LBN 96
+#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_RX_DESC_LBN 0
+#define FRF_AZ_RX_DESC_WIDTH 64
+#define FRF_AZ_RX_DESC_DW0_LBN 0
+#define FRF_AZ_RX_DESC_DW0_WIDTH 32
+#define FRF_AZ_RX_DESC_DW1_LBN 32
+#define FRF_AZ_RX_DESC_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_DC_CFG_REG(128bit):
+ * Receive descriptor cache configuration register
+ */
+#define FR_AZ_RX_DC_CFG_REG_OFST 0x00000840
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_MAX_PF_LBN 2
+#define FRF_AZ_RX_MAX_PF_WIDTH 2
+#define FRF_AZ_RX_DC_SIZE_LBN 0
+#define FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define FFE_AZ_RX_DC_SIZE_64 3
+#define FFE_AZ_RX_DC_SIZE_32 2
+#define FFE_AZ_RX_DC_SIZE_16 1
+#define FFE_AZ_RX_DC_SIZE_8 0
+
+
+/*
+ * FR_AZ_RX_DC_PF_WM_REG(128bit):
+ * Receive descriptor cache pre-fetch watermark register
+ */
+#define FR_AZ_RX_DC_PF_WM_REG_OFST 0x00000850
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+
+/*
+ * FR_BZ_RX_RSS_TKEY_REG(128bit):
+ * RSS Toeplitz hash key
+ */
+#define FR_BZ_RX_RSS_TKEY_REG_OFST 0x00000860
+/* falconb0,sienaa0=net_func_bar2 */
+
+#define FRF_BZ_RX_RSS_TKEY_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW3_LBN 96
+#define FRF_BZ_RX_RSS_TKEY_DW3_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW2_LBN 64
+#define FRF_BZ_RX_RSS_TKEY_DW2_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_LBN 32
+#define FRF_BZ_RX_RSS_TKEY_DW1_WIDTH 32
+#define FRF_BZ_RX_RSS_TKEY_DW0_LBN 0
+#define FRF_BZ_RX_RSS_TKEY_DW0_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_NODESC_DROP_REG(128bit):
+ * Receive dropped packet counter register
+ */
+#define FR_AZ_RX_NODESC_DROP_REG_OFST 0x00000880
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_NODESC_DROP_CNT_LBN 0
+#define FRF_AZ_RX_NODESC_DROP_CNT_WIDTH 16
+
+
+/*
+ * FR_AZ_RX_SELF_RST_REG(128bit):
+ * Receive self reset register
+ */
+#define FR_AZ_RX_SELF_RST_REG_OFST 0x00000890
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_ISCSI_DIS_LBN 17
+#define FRF_AZ_RX_ISCSI_DIS_WIDTH 1
+#define FRF_AB_RX_SW_RST_REG_LBN 16
+#define FRF_AB_RX_SW_RST_REG_WIDTH 1
+#define FRF_AB_RX_SELF_RST_EN_LBN 8
+#define FRF_AB_RX_SELF_RST_EN_WIDTH 1
+#define FRF_AZ_RX_MAX_PF_LAT_LBN 4
+#define FRF_AZ_RX_MAX_PF_LAT_WIDTH 4
+#define FRF_AZ_RX_MAX_LU_LAT_LBN 0
+#define FRF_AZ_RX_MAX_LU_LAT_WIDTH 4
+
+
+/*
+ * FR_AZ_RX_DEBUG_REG(128bit):
+ * undocumented register
+ */
+#define FR_AZ_RX_DEBUG_REG_OFST 0x000008a0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_DEBUG_LBN 0
+#define FRF_AZ_RX_DEBUG_WIDTH 64
+#define FRF_AZ_RX_DEBUG_DW0_LBN 0
+#define FRF_AZ_RX_DEBUG_DW0_WIDTH 32
+#define FRF_AZ_RX_DEBUG_DW1_LBN 32
+#define FRF_AZ_RX_DEBUG_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_RX_PUSH_DROP_REG(128bit):
+ * Receive descriptor push dropped counter register
+ */
+#define FR_AZ_RX_PUSH_DROP_REG_OFST 0x000008b0
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG1(128bit):
+ * IPv6 RSS Toeplitz hash key low bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG1_OFST 0x000008d0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG2(128bit):
+ * IPv6 RSS Toeplitz hash key middle bytes
+ */
+#define FR_CZ_RX_RSS_IPV6_REG2_OFST 0x000008e0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW1_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW2_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_LBN 96
+#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_DW3_WIDTH 32
+
+
+/*
+ * FR_CZ_RX_RSS_IPV6_REG3(128bit):
+ * IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings
+ */
+#define FR_CZ_RX_RSS_IPV6_REG3_OFST 0x000008f0
+/* sienaa0=net_func_bar2 */
+
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_LBN 0
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW0_WIDTH 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_LBN 32
+#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_DW1_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_FLUSH_DESCQ_REG(128bit):
+ * Transmit flush descriptor queue register
+ */
+#define FR_AZ_TX_FLUSH_DESCQ_REG_OFST 0x00000a00
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_BZ_TX_DESC_UPD_REGP0_OFST 0x00000a10
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_BZ_TX_DESC_UPD_REGP0_ROWS 1024
+/*
+ * FR_AA_TX_DESC_UPD_REG_KER(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REG_KER_OFST 0x00000a10
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_UPD_REG_KER_STEP 8192
+#define FR_AA_TX_DESC_UPD_REG_KER_ROWS 8
+/*
+ * FR_AB_TX_DESC_UPD_REGP123(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AB_TX_DESC_UPD_REGP123_OFST 0x01000a10
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_DESC_UPD_REGP123_STEP 8192
+#define FR_AB_TX_DESC_UPD_REGP123_ROWS 3072
+/*
+ * FR_AA_TX_DESC_UPD_REGP0(128bit):
+ * Transmit descriptor update register.
+ */
+#define FR_AA_TX_DESC_UPD_REGP0_OFST 0x00008a10
+/* falcona0=char_func_bar0 */
+#define FR_AA_TX_DESC_UPD_REGP0_STEP 8192
+#define FR_AA_TX_DESC_UPD_REGP0_ROWS 1020
+
+#define FRF_AZ_TX_DESC_WPTR_LBN 96
+#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define FRF_AZ_TX_DESC_LBN 0
+#define FRF_AZ_TX_DESC_WIDTH 95
+#define FRF_AZ_TX_DESC_DW0_LBN 0
+#define FRF_AZ_TX_DESC_DW0_WIDTH 32
+#define FRF_AZ_TX_DESC_DW1_LBN 32
+#define FRF_AZ_TX_DESC_DW1_WIDTH 32
+#define FRF_AZ_TX_DESC_DW2_LBN 64
+#define FRF_AZ_TX_DESC_DW2_WIDTH 31
+
+
+/*
+ * FR_AZ_TX_DC_CFG_REG(128bit):
+ * Transmit descriptor cache configuration register
+ */
+#define FR_AZ_TX_DC_CFG_REG_OFST 0x00000a20
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_DC_SIZE_LBN 0
+#define FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define FFE_AZ_TX_DC_SIZE_32 2
+#define FFE_AZ_TX_DC_SIZE_16 1
+#define FFE_AZ_TX_DC_SIZE_8 0
+
+
+/*
+ * FR_AA_TX_CHKSM_CFG_REG(128bit):
+ * Transmit checksum configuration register
+ */
+#define FR_AA_TX_CHKSM_CFG_REG_OFST 0x00000a30
+/* falcona0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_CFG_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_CFG_REG_OFST 0x00000a50
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+
+/*
+ * FR_AZ_TX_PUSH_DROP_REG(128bit):
+ * Transmit push dropped register
+ */
+#define FR_AZ_TX_PUSH_DROP_REG_OFST 0x00000a60
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+
+/*
+ * FR_AZ_TX_RESERVED_REG(128bit):
+ * Transmit configuration register
+ */
+#define FR_AZ_TX_RESERVED_REG_OFST 0x00000a80
+/* falcona0,falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_EVT_CNT_LBN 121
+#define FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define FRF_AZ_TX_PUSH_EN_LBN 89
+#define FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define FRF_AZ_TX_DMAQ_ST_LBN 78
+#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_LBN 64
+#define FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define FRF_AZ_TX_XP_TIMER_LBN 52
+#define FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define FRF_AZ_TX_PREF_SPACER_LBN 44
+#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define FRF_AZ_TX_ONLY1TAG_LBN 21
+#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define FRF_AA_TX_DMA_FF_THR_LBN 16
+#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define FRF_AZ_TX_DMA_SPACER_LBN 8
+#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define FRF_AA_TX_TCP_DIS_LBN 7
+#define FRF_AA_TX_TCP_DIS_WIDTH 1
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define FRF_AA_TX_IP_DIS_LBN 6
+#define FRF_AA_TX_IP_DIS_WIDTH 1
+#define FRF_AZ_TX_MAX_CPL_LBN 2
+#define FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define FFE_AZ_TX_MAX_CPL_16 3
+#define FFE_AZ_TX_MAX_CPL_8 2
+#define FFE_AZ_TX_MAX_CPL_4 1
+#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define FRF_AZ_TX_MAX_PREF_LBN 0
+#define FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define FFE_AZ_TX_MAX_PREF_32 3
+#define FFE_AZ_TX_MAX_PREF_16 2
+#define FFE_AZ_TX_MAX_PREF_8 1
+#define FFE_AZ_TX_MAX_PREF_OFF 0
+
+
+/*
+ * FR_BZ_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_BZ_TX_PACE_REG_OFST 0x00000a90
+/* falconb0,sienaa0=net_func_bar2 */
+/*
+ * FR_AA_TX_PACE_REG(128bit):
+ * Transmit pace control register
+ */
+#define FR_AA_TX_PACE_REG_OFST 0x00f80000
+/* falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_SB_NOT_AF_LBN 19
+#define FRF_AZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_SB_AF_LBN 9
+#define FRF_AZ_TX_PACE_SB_AF_WIDTH 10
+#define FRF_AZ_TX_PACE_FB_BASE_LBN 5
+#define FRF_AZ_TX_PACE_FB_BASE_WIDTH 4
+#define FRF_AZ_TX_PACE_BIN_TH_LBN 0
+#define FRF_AZ_TX_PACE_BIN_TH_WIDTH 5
+
+
+/*
+ * FR_AZ_TX_PACE_DROP_QID_REG(128bit):
+ * PACE Drop QID Counter
+ */
+#define FR_AZ_TX_PACE_DROP_QID_REG_OFST 0x00000aa0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define FRF_AZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_TX_VLAN_REG(128bit):
+ * Transmit VLAN tag register
+ */
+#define FR_AB_TX_VLAN_REG_OFST 0x00000ae0
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_TX_VLAN_EN_LBN 127
+#define FRF_AB_TX_VLAN_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT1_EN_LBN 125
+#define FRF_AB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_PORT0_EN_LBN 124
+#define FRF_AB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN7_LBN 112
+#define FRF_AB_TX_VLAN7_WIDTH 12
+#define FRF_AB_TX_VLAN6_PORT1_EN_LBN 109
+#define FRF_AB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_PORT0_EN_LBN 108
+#define FRF_AB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN6_LBN 96
+#define FRF_AB_TX_VLAN6_WIDTH 12
+#define FRF_AB_TX_VLAN5_PORT1_EN_LBN 93
+#define FRF_AB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_PORT0_EN_LBN 92
+#define FRF_AB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN5_LBN 80
+#define FRF_AB_TX_VLAN5_WIDTH 12
+#define FRF_AB_TX_VLAN4_PORT1_EN_LBN 77
+#define FRF_AB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_PORT0_EN_LBN 76
+#define FRF_AB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN4_LBN 64
+#define FRF_AB_TX_VLAN4_WIDTH 12
+#define FRF_AB_TX_VLAN3_PORT1_EN_LBN 61
+#define FRF_AB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_PORT0_EN_LBN 60
+#define FRF_AB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN3_LBN 48
+#define FRF_AB_TX_VLAN3_WIDTH 12
+#define FRF_AB_TX_VLAN2_PORT1_EN_LBN 45
+#define FRF_AB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_PORT0_EN_LBN 44
+#define FRF_AB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN2_LBN 32
+#define FRF_AB_TX_VLAN2_WIDTH 12
+#define FRF_AB_TX_VLAN1_PORT1_EN_LBN 29
+#define FRF_AB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_PORT0_EN_LBN 28
+#define FRF_AB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN1_LBN 16
+#define FRF_AB_TX_VLAN1_WIDTH 12
+#define FRF_AB_TX_VLAN0_PORT1_EN_LBN 13
+#define FRF_AB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_PORT0_EN_LBN 12
+#define FRF_AB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define FRF_AB_TX_VLAN0_LBN 0
+#define FRF_AB_TX_VLAN0_WIDTH 12
+
+
+/*
+ * FR_AZ_TX_IPFIL_PORTEN_REG(128bit):
+ * Transmit filter control register
+ */
+#define FR_AZ_TX_IPFIL_PORTEN_REG_OFST 0x00000af0
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AZ_TX_MADR0_FIL_EN_LBN 64
+#define FRF_AZ_TX_MADR0_FIL_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL31_PORT_EN_LBN 62
+#define FRF_AB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL30_PORT_EN_LBN 60
+#define FRF_AB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL29_PORT_EN_LBN 58
+#define FRF_AB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL28_PORT_EN_LBN 56
+#define FRF_AB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL27_PORT_EN_LBN 54
+#define FRF_AB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL26_PORT_EN_LBN 52
+#define FRF_AB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL25_PORT_EN_LBN 50
+#define FRF_AB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL24_PORT_EN_LBN 48
+#define FRF_AB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL23_PORT_EN_LBN 46
+#define FRF_AB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL22_PORT_EN_LBN 44
+#define FRF_AB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL21_PORT_EN_LBN 42
+#define FRF_AB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL20_PORT_EN_LBN 40
+#define FRF_AB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL19_PORT_EN_LBN 38
+#define FRF_AB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL18_PORT_EN_LBN 36
+#define FRF_AB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL17_PORT_EN_LBN 34
+#define FRF_AB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL16_PORT_EN_LBN 32
+#define FRF_AB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL15_PORT_EN_LBN 30
+#define FRF_AB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL14_PORT_EN_LBN 28
+#define FRF_AB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL13_PORT_EN_LBN 26
+#define FRF_AB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL12_PORT_EN_LBN 24
+#define FRF_AB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL11_PORT_EN_LBN 22
+#define FRF_AB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL10_PORT_EN_LBN 20
+#define FRF_AB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL9_PORT_EN_LBN 18
+#define FRF_AB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL8_PORT_EN_LBN 16
+#define FRF_AB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL7_PORT_EN_LBN 14
+#define FRF_AB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL6_PORT_EN_LBN 12
+#define FRF_AB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL5_PORT_EN_LBN 10
+#define FRF_AB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL4_PORT_EN_LBN 8
+#define FRF_AB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL3_PORT_EN_LBN 6
+#define FRF_AB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL2_PORT_EN_LBN 4
+#define FRF_AB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL1_PORT_EN_LBN 2
+#define FRF_AB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define FRF_AB_TX_IPFIL0_PORT_EN_LBN 0
+#define FRF_AB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+
+/*
+ * FR_AB_TX_IPFIL_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_AB_TX_IPFIL_TBL_OFST 0x00000b00
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_TX_IPFIL_TBL_STEP 16
+#define FR_AB_TX_IPFIL_TBL_ROWS 16
+
+#define FRF_AB_TX_IPFIL_MASK_1_LBN 96
+#define FRF_AB_TX_IPFIL_MASK_1_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_1_LBN 64
+#define FRF_AB_TX_IP_SRC_ADR_1_WIDTH 32
+#define FRF_AB_TX_IPFIL_MASK_0_LBN 32
+#define FRF_AB_TX_IPFIL_MASK_0_WIDTH 32
+#define FRF_AB_TX_IP_SRC_ADR_0_LBN 0
+#define FRF_AB_TX_IP_SRC_ADR_0_WIDTH 32
+
+
+/*
+ * FR_AB_MD_TXD_REG(128bit):
+ * PHY management transmit data register
+ */
+#define FR_AB_MD_TXD_REG_OFST 0x00000c00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_TXD_LBN 0
+#define FRF_AB_MD_TXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_RXD_REG(128bit):
+ * PHY management receive data register
+ */
+#define FR_AB_MD_RXD_REG_OFST 0x00000c10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RXD_LBN 0
+#define FRF_AB_MD_RXD_WIDTH 16
+
+
+/*
+ * FR_AB_MD_CS_REG(128bit):
+ * PHY management configuration & status register
+ */
+#define FR_AB_MD_CS_REG_OFST 0x00000c20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_RD_EN_LBN 15
+#define FRF_AB_MD_RD_EN_WIDTH 1
+#define FRF_AB_MD_WR_EN_LBN 14
+#define FRF_AB_MD_WR_EN_WIDTH 1
+#define FRF_AB_MD_ADDR_CMD_LBN 13
+#define FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define FRF_AB_MD_PT_LBN 7
+#define FRF_AB_MD_PT_WIDTH 3
+#define FRF_AB_MD_PL_LBN 6
+#define FRF_AB_MD_PL_WIDTH 1
+#define FRF_AB_MD_INT_CLR_LBN 5
+#define FRF_AB_MD_INT_CLR_WIDTH 1
+#define FRF_AB_MD_GC_LBN 4
+#define FRF_AB_MD_GC_WIDTH 1
+#define FRF_AB_MD_PRSP_LBN 3
+#define FRF_AB_MD_PRSP_WIDTH 1
+#define FRF_AB_MD_RIC_LBN 2
+#define FRF_AB_MD_RIC_WIDTH 1
+#define FRF_AB_MD_RDC_LBN 1
+#define FRF_AB_MD_RDC_WIDTH 1
+#define FRF_AB_MD_WRC_LBN 0
+#define FRF_AB_MD_WRC_WIDTH 1
+
+
+/*
+ * FR_AB_MD_PHY_ADR_REG(128bit):
+ * PHY management PHY address register
+ */
+#define FR_AB_MD_PHY_ADR_REG_OFST 0x00000c30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PHY_ADR_LBN 0
+#define FRF_AB_MD_PHY_ADR_WIDTH 16
+
+
+/*
+ * FR_AB_MD_ID_REG(128bit):
+ * PHY management ID register
+ */
+#define FR_AB_MD_ID_REG_OFST 0x00000c40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PRT_ADR_LBN 11
+#define FRF_AB_MD_PRT_ADR_WIDTH 5
+#define FRF_AB_MD_DEV_ADR_LBN 6
+#define FRF_AB_MD_DEV_ADR_WIDTH 5
+
+
+/*
+ * FR_AB_MD_STAT_REG(128bit):
+ * PHY management status & mask register
+ */
+#define FR_AB_MD_STAT_REG_OFST 0x00000c50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MD_PINT_LBN 4
+#define FRF_AB_MD_PINT_WIDTH 1
+#define FRF_AB_MD_DONE_LBN 3
+#define FRF_AB_MD_DONE_WIDTH 1
+#define FRF_AB_MD_BSERR_LBN 2
+#define FRF_AB_MD_BSERR_WIDTH 1
+#define FRF_AB_MD_LNFL_LBN 1
+#define FRF_AB_MD_LNFL_WIDTH 1
+#define FRF_AB_MD_BSY_LBN 0
+#define FRF_AB_MD_BSY_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_STAT_DMA_REG(128bit):
+ * Port MAC statistical counter DMA register
+ */
+#define FR_AB_MAC_STAT_DMA_REG_OFST 0x00000c60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_LBN 0
+#define FRF_AB_MAC_STAT_DMA_ADR_DW0_WIDTH 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_LBN 32
+#define FRF_AB_MAC_STAT_DMA_ADR_DW1_WIDTH 16
+
+
+/*
+ * FR_AB_MAC_CTRL_REG(128bit):
+ * Port MAC control register
+ */
+#define FR_AB_MAC_CTRL_REG_OFST 0x00000c80
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_XOFF_VAL_LBN 16
+#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define FRF_AB_MAC_UC_PROM_LBN 3
+#define FRF_AB_MAC_UC_PROM_WIDTH 1
+#define FRF_AB_MAC_LINK_STATUS_LBN 2
+#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define FRF_AB_MAC_SPEED_LBN 0
+#define FRF_AB_MAC_SPEED_WIDTH 2
+#define FRF_AB_MAC_SPEED_10M 0
+#define FRF_AB_MAC_SPEED_100M 1
+#define FRF_AB_MAC_SPEED_1G 2
+#define FRF_AB_MAC_SPEED_10G 3
+
+/*
+ * FR_BB_GEN_MODE_REG(128bit):
+ * General Purpose mode register (external interrupt mask)
+ */
+#define FR_BB_GEN_MODE_REG_OFST 0x00000c90
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG0(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH0_REG_OFST 0x00000ca0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH0_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH0_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH0_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH0_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH0_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH0_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_MAC_MC_HASH_REG1(128bit):
+ * Multicast address hash table
+ */
+#define FR_AB_MAC_MC_HASH1_REG_OFST 0x00000cb0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+#define FRF_AB_MAC_MCAST_HASH1_DW0_LBN 0
+#define FRF_AB_MAC_MCAST_HASH1_DW0_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_LBN 32
+#define FRF_AB_MAC_MCAST_HASH1_DW1_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW2_LBN 64
+#define FRF_AB_MAC_MCAST_HASH1_DW2_WIDTH 32
+#define FRF_AB_MAC_MCAST_HASH1_DW3_LBN 96
+#define FRF_AB_MAC_MCAST_HASH1_DW3_WIDTH 32
+
+
+/*
+ * FR_AB_GM_CFG1_REG(32bit):
+ * GMAC configuration register 1
+ */
+#define FR_AB_GM_CFG1_REG_OFST 0x00000e00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_SW_RST_LBN 31
+#define FRF_AB_GM_SW_RST_WIDTH 1
+#define FRF_AB_GM_SIM_RST_LBN 30
+#define FRF_AB_GM_SIM_RST_WIDTH 1
+#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define FRF_AB_GM_LOOP_LBN 8
+#define FRF_AB_GM_LOOP_WIDTH 1
+#define FRF_AB_GM_RX_FC_EN_LBN 5
+#define FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define FRF_AB_GM_TX_FC_EN_LBN 4
+#define FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_RXEN_LBN 3
+#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define FRF_AB_GM_RX_EN_LBN 2
+#define FRF_AB_GM_RX_EN_WIDTH 1
+#define FRF_AB_GM_SYNC_TXEN_LBN 1
+#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define FRF_AB_GM_TX_EN_LBN 0
+#define FRF_AB_GM_TX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_GM_CFG2_REG(32bit):
+ * GMAC configuration register 2
+ */
+#define FR_AB_GM_CFG2_REG_OFST 0x00000e10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_PAMBL_LEN_LBN 12
+#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define FRF_AB_GM_IF_MODE_LBN 8
+#define FRF_AB_GM_IF_MODE_WIDTH 2
+#define FRF_AB_GM_IF_MODE_BYTE_MODE 2
+#define FRF_AB_GM_IF_MODE_NIBBLE_MODE 1
+#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define FRF_AB_GM_LEN_CHK_LBN 4
+#define FRF_AB_GM_LEN_CHK_WIDTH 1
+#define FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define FRF_AB_GM_CRC_EN_LBN 1
+#define FRF_AB_GM_CRC_EN_WIDTH 1
+#define FRF_AB_GM_FD_LBN 0
+#define FRF_AB_GM_FD_WIDTH 1
+
+
+/*
+ * FR_AB_GM_IPG_REG(32bit):
+ * GMAC IPG register
+ */
+#define FR_AB_GM_IPG_REG_OFST 0x00000e20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define FRF_AB_GM_B2B_IPG_LBN 0
+#define FRF_AB_GM_B2B_IPG_WIDTH 7
+
+
+/*
+ * FR_AB_GM_HD_REG(32bit):
+ * GMAC half duplex register
+ */
+#define FR_AB_GM_HD_REG_OFST 0x00000e30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define FRF_AB_GM_DIS_BOFF_LBN 17
+#define FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define FRF_AB_GM_COL_WIN_LBN 0
+#define FRF_AB_GM_COL_WIN_WIDTH 10
+
+
+/*
+ * FR_AB_GM_MAX_FLEN_REG(32bit):
+ * GMAC maximum frame length register
+ */
+#define FR_AB_GM_MAX_FLEN_REG_OFST 0x00000e40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_FLEN_LBN 0
+#define FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+
+/*
+ * FR_AB_GM_TEST_REG(32bit):
+ * GMAC test register
+ */
+#define FR_AB_GM_TEST_REG_OFST 0x00000e70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_MAX_BOFF_LBN 3
+#define FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define FRF_AB_GM_TEST_PAUSE_LBN 1
+#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define FRF_AB_GM_SHORT_SLOT_LBN 0
+#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+
+/*
+ * FR_AB_GM_ADR1_REG(32bit):
+ * GMAC station address register 1
+ */
+#define FR_AB_GM_ADR1_REG_OFST 0x00000f00
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B0_LBN 24
+#define FRF_AB_GM_ADR_B0_WIDTH 8
+#define FRF_AB_GM_ADR_B1_LBN 16
+#define FRF_AB_GM_ADR_B1_WIDTH 8
+#define FRF_AB_GM_ADR_B2_LBN 8
+#define FRF_AB_GM_ADR_B2_WIDTH 8
+#define FRF_AB_GM_ADR_B3_LBN 0
+#define FRF_AB_GM_ADR_B3_WIDTH 8
+
+
+/*
+ * FR_AB_GM_ADR2_REG(32bit):
+ * GMAC station address register 2
+ */
+#define FR_AB_GM_ADR2_REG_OFST 0x00000f10
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GM_ADR_B4_LBN 24
+#define FRF_AB_GM_ADR_B4_WIDTH 8
+#define FRF_AB_GM_ADR_B5_LBN 16
+#define FRF_AB_GM_ADR_B5_WIDTH 8
+
+
+/*
+ * FR_AB_GMF_CFG0_REG(32bit):
+ * GMAC FIFO configuration register 0
+ */
+#define FR_AB_GMF_CFG0_REG_OFST 0x00000f20
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_FTFENRPLY_LBN 20
+#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define FRF_AB_GMF_STFENRPLY_LBN 19
+#define FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define FRF_AB_GMF_FRFENRPLY_LBN 18
+#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_SRFENRPLY_LBN 17
+#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define FRF_AB_GMF_WTMENRPLY_LBN 16
+#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define FRF_AB_GMF_FTFENREQ_LBN 12
+#define FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define FRF_AB_GMF_STFENREQ_LBN 11
+#define FRF_AB_GMF_STFENREQ_WIDTH 1
+#define FRF_AB_GMF_FRFENREQ_LBN 10
+#define FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define FRF_AB_GMF_SRFENREQ_LBN 9
+#define FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define FRF_AB_GMF_WTMENREQ_LBN 8
+#define FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFT_LBN 4
+#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define FRF_AB_GMF_HSTRSTST_LBN 3
+#define FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define FRF_AB_GMF_HSTRSTFR_LBN 2
+#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTSR_LBN 1
+#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define FRF_AB_GMF_HSTRSTWT_LBN 0
+#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+
+/*
+ * FR_AB_GMF_CFG1_REG(32bit):
+ * GMAC FIFO configuration register 1
+ */
+#define FR_AB_GMF_CFG1_REG_OFST 0x00000f30
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGFRTH_LBN 16
+#define FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+
+/*
+ * FR_AB_GMF_CFG2_REG(32bit):
+ * GMAC FIFO configuration register 2
+ */
+#define FR_AB_GMF_CFG2_REG_OFST 0x00000f40
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWM_LBN 16
+#define FRF_AB_GMF_CFGHWM_WIDTH 6
+#define FRF_AB_GMF_CFGLWM_LBN 0
+#define FRF_AB_GMF_CFGLWM_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG3_REG(32bit):
+ * GMAC FIFO configuration register 3
+ */
+#define FR_AB_GMF_CFG3_REG_OFST 0x00000f50
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHWMFT_LBN 16
+#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define FRF_AB_GMF_CFGFTTH_LBN 0
+#define FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+
+/*
+ * FR_AB_GMF_CFG4_REG(32bit):
+ * GMAC FIFO configuration register 4
+ */
+#define FR_AB_GMF_CFG4_REG_OFST 0x00000f60
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+
+/*
+ * FR_AB_GMF_CFG5_REG(32bit):
+ * GMAC FIFO configuration register 5
+ */
+#define FR_AB_GMF_CFG5_REG_OFST 0x00000f70
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_GMF_CFGHDPLX_LBN 22
+#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define FRF_AB_GMF_SRFULL_LBN 21
+#define FRF_AB_GMF_SRFULL_WIDTH 1
+#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+
+/*
+ * FR_BB_TX_SRC_MAC_TBL(128bit):
+ * Transmit IP source address filter table
+ */
+#define FR_BB_TX_SRC_MAC_TBL_OFST 0x00001000
+/* falconb0=net_func_bar2 */
+#define FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
+
+#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_LBN 64
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_LBN 96
+#define FRF_BB_TX_SRC_MAC_ADR_1_DW1_WIDTH 16
+#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_LBN 0
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW0_WIDTH 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_LBN 32
+#define FRF_BB_TX_SRC_MAC_ADR_0_DW1_WIDTH 16
+
+
+/*
+ * FR_BB_TX_SRC_MAC_CTL_REG(128bit):
+ * Transmit MAC source address filter control
+ */
+#define FR_BB_TX_SRC_MAC_CTL_REG_OFST 0x00001100
+/* falconb0=net_func_bar2 */
+
+#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+
+/*
+ * FR_AB_XM_ADR_LO_REG(128bit):
+ * XGMAC address register low
+ */
+#define FR_AB_XM_ADR_LO_REG_OFST 0x00001200
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_LO_LBN 0
+#define FRF_AB_XM_ADR_LO_WIDTH 32
+
+
+/*
+ * FR_AB_XM_ADR_HI_REG(128bit):
+ * XGMAC address register high
+ */
+#define FR_AB_XM_ADR_HI_REG_OFST 0x00001210
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_ADR_HI_LBN 0
+#define FRF_AB_XM_ADR_HI_WIDTH 16
+
+
+/*
+ * FR_AB_XM_GLB_CFG_REG(128bit):
+ * XGMAC global configuration
+ */
+#define FR_AB_XM_GLB_CFG_REG_OFST 0x00001220
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define FRF_AB_XM_DEBUG_MODE_LBN 16
+#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define FRF_AB_XM_RX_STAT_EN_LBN 11
+#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_TX_STAT_EN_LBN 10
+#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_WAN_MODE_LBN 5
+#define FRF_AB_XM_WAN_MODE_WIDTH 1
+#define FRF_AB_XM_INTCLR_MODE_LBN 3
+#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define FRF_AB_XM_CORE_RST_LBN 0
+#define FRF_AB_XM_CORE_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_TX_CFG_REG(128bit):
+ * XGMAC transmit configuration
+ */
+#define FR_AB_XM_TX_CFG_REG_OFST 0x00001230
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PROG_LBN 24
+#define FRF_AB_XM_TX_PROG_WIDTH 1
+#define FRF_AB_XM_IPG_LBN 16
+#define FRF_AB_XM_IPG_WIDTH 4
+#define FRF_AB_XM_FCNTL_LBN 10
+#define FRF_AB_XM_FCNTL_WIDTH 1
+#define FRF_AB_XM_TXCRC_LBN 8
+#define FRF_AB_XM_TXCRC_WIDTH 1
+#define FRF_AB_XM_EDRC_LBN 6
+#define FRF_AB_XM_EDRC_WIDTH 1
+#define FRF_AB_XM_AUTO_PAD_LBN 5
+#define FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define FRF_AB_XM_TX_PRMBL_LBN 2
+#define FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define FRF_AB_XM_TXEN_LBN 1
+#define FRF_AB_XM_TXEN_WIDTH 1
+#define FRF_AB_XM_TX_RST_LBN 0
+#define FRF_AB_XM_TX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_RX_CFG_REG(128bit):
+ * XGMAC receive configuration
+ */
+#define FR_AB_XM_RX_CFG_REG_OFST 0x00001240
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PASS_LENERR_LBN 26
+#define FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_REJ_BCAST_LBN 20
+#define FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define FRF_AB_XM_RXCRC_LBN 3
+#define FRF_AB_XM_RXCRC_WIDTH 1
+#define FRF_AB_XM_RX_PRMBL_LBN 2
+#define FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define FRF_AB_XM_RXEN_LBN 1
+#define FRF_AB_XM_RXEN_WIDTH 1
+#define FRF_AB_XM_RX_RST_LBN 0
+#define FRF_AB_XM_RX_RST_WIDTH 1
+
+
+/*
+ * FR_AB_XM_MGT_INT_MASK(128bit):
+ * documentation to be written for sum_XM_MGT_INT_MASK
+ */
+#define FR_AB_XM_MGT_INT_MASK_OFST 0x00001250
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XM_FC_REG(128bit):
+ * XGMAC flow control register
+ */
+#define FR_AB_XM_FC_REG_OFST 0x00001270
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_PAUSE_TIME_LBN 16
+#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define FRF_AB_XM_MCNTL_PASS_LBN 8
+#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define FRF_AB_XM_ZPAUSE_LBN 2
+#define FRF_AB_XM_ZPAUSE_WIDTH 1
+#define FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define FRF_AB_XM_DIS_FCNTL_LBN 0
+#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+
+/*
+ * FR_AB_XM_PAUSE_TIME_REG(128bit):
+ * XGMAC pause time register
+ */
+#define FR_AB_XM_PAUSE_TIME_REG_OFST 0x00001290
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+
+/*
+ * FR_AB_XM_TX_PARAM_REG(128bit):
+ * XGMAC transmit parameter register
+ */
+#define FR_AB_XM_TX_PARAM_REG_OFST 0x000012d0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define FRF_AB_XM_PAD_CHAR_LBN 0
+#define FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+
+/*
+ * FR_AB_XM_RX_PARAM_REG(128bit):
+ * XGMAC receive parameter register
+ */
+#define FR_AB_XM_RX_PARAM_REG_OFST 0x000012e0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+
+/*
+ * FR_AB_XM_MGT_INT_MSK_REG(128bit):
+ * XGMAC management interrupt mask register
+ */
+#define FR_AB_XM_MGT_INT_REG_OFST 0x000012f0
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define FRF_AB_XM_RMTFLT_LBN 1
+#define FRF_AB_XM_RMTFLT_WIDTH 1
+#define FRF_AB_XM_LCLFLT_LBN 0
+#define FRF_AB_XM_LCLFLT_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PWR_RST_REG(128bit):
+ * XGXS/XAUI powerdown/reset register
+ */
+#define FR_AB_XX_PWR_RST_REG_OFST 0x00001300
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_PWRDND_SIG_LBN 31
+#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define FRF_AB_XX_SIM_MODE_LBN 27
+#define FRF_AB_XX_SIM_MODE_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETD_SIG_LBN 23
+#define FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define FRF_AB_XX_RESETC_SIG_LBN 22
+#define FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define FRF_AB_XX_RESETB_SIG_LBN 21
+#define FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define FRF_AB_XX_RESETA_SIG_LBN 20
+#define FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define FRF_AB_XX_SD_RST_ACT_LBN 16
+#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define FRF_AB_XX_PWRDND_EN_LBN 15
+#define FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNC_EN_LBN 14
+#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNB_EN_LBN 13
+#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define FRF_AB_XX_PWRDNA_EN_LBN 12
+#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define FRF_AB_XX_RESETD_EN_LBN 7
+#define FRF_AB_XX_RESETD_EN_WIDTH 1
+#define FRF_AB_XX_RESETC_EN_LBN 6
+#define FRF_AB_XX_RESETC_EN_WIDTH 1
+#define FRF_AB_XX_RESETB_EN_LBN 5
+#define FRF_AB_XX_RESETB_EN_WIDTH 1
+#define FRF_AB_XX_RESETA_EN_LBN 4
+#define FRF_AB_XX_RESETA_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define FRF_AB_XX_RST_XX_EN_LBN 0
+#define FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_SD_CTL_REG(128bit):
+ * XGXS/XAUI powerdown/reset control register
+ */
+#define FR_AB_XX_SD_CTL_REG_OFST 0x00001310
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_TERMADJ1_LBN 17
+#define FRF_AB_XX_TERMADJ1_WIDTH 1
+#define FRF_AB_XX_TERMADJ0_LBN 16
+#define FRF_AB_XX_TERMADJ0_WIDTH 1
+#define FRF_AB_XX_HIDRVD_LBN 15
+#define FRF_AB_XX_HIDRVD_WIDTH 1
+#define FRF_AB_XX_LODRVD_LBN 14
+#define FRF_AB_XX_LODRVD_WIDTH 1
+#define FRF_AB_XX_HIDRVC_LBN 13
+#define FRF_AB_XX_HIDRVC_WIDTH 1
+#define FRF_AB_XX_LODRVC_LBN 12
+#define FRF_AB_XX_LODRVC_WIDTH 1
+#define FRF_AB_XX_HIDRVB_LBN 11
+#define FRF_AB_XX_HIDRVB_WIDTH 1
+#define FRF_AB_XX_LODRVB_LBN 10
+#define FRF_AB_XX_LODRVB_WIDTH 1
+#define FRF_AB_XX_HIDRVA_LBN 9
+#define FRF_AB_XX_HIDRVA_WIDTH 1
+#define FRF_AB_XX_LODRVA_LBN 8
+#define FRF_AB_XX_LODRVA_WIDTH 1
+#define FRF_AB_XX_LPBKD_LBN 3
+#define FRF_AB_XX_LPBKD_WIDTH 1
+#define FRF_AB_XX_LPBKC_LBN 2
+#define FRF_AB_XX_LPBKC_WIDTH 1
+#define FRF_AB_XX_LPBKB_LBN 1
+#define FRF_AB_XX_LPBKB_WIDTH 1
+#define FRF_AB_XX_LPBKA_LBN 0
+#define FRF_AB_XX_LPBKA_WIDTH 1
+
+
+/*
+ * FR_AB_XX_TXDRV_CTL_REG(128bit):
+ * XAUI SerDes transmit drive control register
+ */
+#define FR_AB_XX_TXDRV_CTL_REG_OFST 0x00001320
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_DEQD_LBN 28
+#define FRF_AB_XX_DEQD_WIDTH 4
+#define FRF_AB_XX_DEQC_LBN 24
+#define FRF_AB_XX_DEQC_WIDTH 4
+#define FRF_AB_XX_DEQB_LBN 20
+#define FRF_AB_XX_DEQB_WIDTH 4
+#define FRF_AB_XX_DEQA_LBN 16
+#define FRF_AB_XX_DEQA_WIDTH 4
+#define FRF_AB_XX_DTXD_LBN 12
+#define FRF_AB_XX_DTXD_WIDTH 4
+#define FRF_AB_XX_DTXC_LBN 8
+#define FRF_AB_XX_DTXC_WIDTH 4
+#define FRF_AB_XX_DTXB_LBN 4
+#define FRF_AB_XX_DTXB_WIDTH 4
+#define FRF_AB_XX_DTXA_LBN 0
+#define FRF_AB_XX_DTXA_WIDTH 4
+
+
+/*
+ * FR_AB_XX_PRBS_CTL_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CTL_REG
+ */
+#define FR_AB_XX_PRBS_CTL_REG_OFST 0x00001330
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_CHK_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_CHK_REG
+ */
+#define FR_AB_XX_PRBS_CHK_REG_OFST 0x00001340
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_REV_LB_EN_LBN 16
+#define FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+
+/*
+ * FR_AB_XX_PRBS_ERR_REG(128bit):
+ * documentation to be written for sum_XX_PRBS_ERR_REG
+ */
+#define FR_AB_XX_PRBS_ERR_REG_OFST 0x00001350
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+
+/*
+ * FR_AB_XX_CORE_STAT_REG(128bit):
+ * XAUI XGXS core status register
+ */
+#define FR_AB_XX_CORE_STAT_REG_OFST 0x00001360
+/* falcona0,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+
+#define FRF_AB_XX_FORCE_SIG3_LBN 31
+#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_LBN 29
+#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_LBN 27
+#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_LBN 25
+#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define FRF_AB_XX_MATCH_FAULT_LBN 21
+#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define FRF_AB_XX_ALIGN_DONE_LBN 20
+#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT3_LBN 19
+#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT2_LBN 18
+#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT1_LBN 17
+#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define FRF_AB_XX_SYNC_STAT0_LBN 16
+#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH3_LBN 3
+#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH2_LBN 2
+#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH1_LBN 1
+#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define FRF_AB_XX_DISPERR_CH0_LBN 0
+#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+
+/*
+ * FR_AA_RX_DESC_PTR_TBL_KER(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AA_RX_DESC_PTR_TBL_KER_OFST 0x00011800
+/* falcona0=net_func_bar2 */
+#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_RX_DESC_PTR_TBL(128bit):
+ * Receive descriptor pointer table
+ */
+#define FR_AZ_RX_DESC_PTR_TBL_OFST 0x00f40000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_DESC_PTR_TBL_STEP 16
+#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define FR_AB_RX_DESC_PTR_TBL_ROWS 4096
+
+#define FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define FRF_AZ_RX_RESET_LBN 89
+#define FRF_AZ_RX_RESET_WIDTH 1
+#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define FFE_AZ_RX_DESCQ_SIZE_512 0
+#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define FRF_AZ_RX_DESCQ_EN_LBN 0
+#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+
+/*
+ * FR_AA_TX_DESC_PTR_TBL_KER(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AA_TX_DESC_PTR_TBL_KER_OFST 0x00011900
+/* falcona0=net_func_bar2 */
+#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/*
+ * FR_AZ_TX_DESC_PTR_TBL(128bit):
+ * Transmit descriptor pointer
+ */
+#define FR_AZ_TX_DESC_PTR_TBL_OFST 0x00f50000
+/* falconb0=net_func_bar2,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TX_DESC_PTR_TBL_STEP 16
+#define FR_AB_TX_DESC_PTR_TBL_ROWS 4096
+#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define FRF_AZ_TX_DESCQ_EN_LBN 88
+#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define FFE_AZ_TX_DESCQ_SIZE_512 0
+#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+
+/*
+ * FR_AA_EVQ_PTR_TBL_KER(128bit):
+ * Event queue pointer table
+ */
+#define FR_AA_EVQ_PTR_TBL_KER_OFST 0x00011a00
+/* falcona0=net_func_bar2 */
+#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/*
+ * FR_AZ_EVQ_PTR_TBL(128bit):
+ * Event queue pointer table
+ */
+#define FR_AZ_EVQ_PTR_TBL_OFST 0x00f60000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_EVQ_PTR_TBL_STEP 16
+#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define FR_AB_EVQ_PTR_TBL_ROWS 4096
+
+#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_LBN 39
+#define FRF_AZ_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define FRF_AZ_EVQ_EN_LBN 23
+#define FRF_AZ_EVQ_EN_WIDTH 1
+#define FRF_AZ_EVQ_SIZE_LBN 20
+#define FRF_AZ_EVQ_SIZE_WIDTH 3
+#define FFE_AZ_EVQ_SIZE_32K 6
+#define FFE_AZ_EVQ_SIZE_16K 5
+#define FFE_AZ_EVQ_SIZE_8K 4
+#define FFE_AZ_EVQ_SIZE_4K 3
+#define FFE_AZ_EVQ_SIZE_2K 2
+#define FFE_AZ_EVQ_SIZE_1K 1
+#define FFE_AZ_EVQ_SIZE_512 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+
+/*
+ * FR_AA_BUF_HALF_TBL_KER(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_HALF_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_HALF_TBL(64bit):
+ * Buffer table in half buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_HALF_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_HALF_TBL_STEP 8
+#define FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define FR_AB_BUF_HALF_TBL_ROWS 524288
+
+#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+
+/*
+ * FR_AA_BUF_FULL_TBL_KER(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AA_BUF_FULL_TBL_KER_OFST 0x00018000
+/* falcona0=net_func_bar2 */
+#define FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/*
+ * FR_AZ_BUF_FULL_TBL(64bit):
+ * Buffer table in full buffer table mode direct access by driver
+ */
+#define FR_AZ_BUF_FULL_TBL_OFST 0x00800000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_BUF_FULL_TBL_STEP 8
+
+#define FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define FR_AB_BUF_FULL_TBL_ROWS 917504
+
+#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define FRF_AZ_BUF_ADR_REGION_LBN 48
+#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define FFE_AZ_BUF_ADR_REGN3 3
+#define FFE_AZ_BUF_ADR_REGN2 2
+#define FFE_AZ_BUF_ADR_REGN1 1
+#define FFE_AZ_BUF_ADR_REGN0 0
+#define FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define FRF_AZ_BUF_ADR_FBUF_DW0_LBN 14
+#define FRF_AZ_BUF_ADR_FBUF_DW0_WIDTH 32
+#define FRF_AZ_BUF_ADR_FBUF_DW1_LBN 46
+#define FRF_AZ_BUF_ADR_FBUF_DW1_WIDTH 2
+#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+
+/*
+ * FR_AZ_RX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AZ_RX_FILTER_TBL0_OFST 0x00f00000
+/* falconb0,sienaa0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_RX_FILTER_TBL0_STEP 32
+#define FR_AZ_RX_FILTER_TBL0_ROWS 8192
+/*
+ * FR_AB_RX_FILTER_TBL1(128bit):
+ * TCP/IPv4 Receive filter table
+ */
+#define FR_AB_RX_FILTER_TBL1_OFST 0x00f00010
+/* falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AB_RX_FILTER_TBL1_STEP 32
+#define FR_AB_RX_FILTER_TBL1_ROWS 8192
+
+#define FRF_BZ_RSS_EN_LBN 110
+#define FRF_BZ_RSS_EN_WIDTH 1
+#define FRF_BZ_SCATTER_EN_LBN 109
+#define FRF_BZ_SCATTER_EN_WIDTH 1
+#define FRF_AZ_TCP_UDP_LBN 108
+#define FRF_AZ_TCP_UDP_WIDTH 1
+#define FRF_AZ_RXQ_ID_LBN 96
+#define FRF_AZ_RXQ_ID_WIDTH 12
+#define FRF_AZ_DEST_IP_LBN 64
+#define FRF_AZ_DEST_IP_WIDTH 32
+#define FRF_AZ_DEST_PORT_TCP_LBN 48
+#define FRF_AZ_DEST_PORT_TCP_WIDTH 16
+#define FRF_AZ_SRC_IP_LBN 16
+#define FRF_AZ_SRC_IP_WIDTH 32
+#define FRF_AZ_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_AZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_RX_MAC_FILTER_TBL0(128bit):
+ * Receive Ethernet filter table
+ */
+#define FR_CZ_RX_MAC_FILTER_TBL0_OFST 0x00f00010
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_RMFT_RSS_EN_LBN 75
+#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define FRF_CZ_RMFT_DEST_MAC_DW0_LBN 12
+#define FRF_CZ_RMFT_DEST_MAC_DW0_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_DW1_LBN 44
+#define FRF_CZ_RMFT_DEST_MAC_DW1_WIDTH 16
+#define FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_AZ_TIMER_TBL(128bit):
+ * Timer table
+ */
+#define FR_AZ_TIMER_TBL_OFST 0x00f70000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_TIMER_TBL_STEP 16
+#define FR_CZ_TIMER_TBL_ROWS 1024
+#define FR_AB_TIMER_TBL_ROWS 4096
+
+#define FRF_CZ_TIMER_Q_EN_LBN 33
+#define FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define FRF_CZ_INT_ARMD_LBN 32
+#define FRF_CZ_INT_ARMD_WIDTH 1
+#define FRF_CZ_INT_PEND_LBN 31
+#define FRF_CZ_INT_PEND_WIDTH 1
+#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define FRF_CZ_TIMER_MODE_LBN 14
+#define FRF_CZ_TIMER_MODE_WIDTH 2
+#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define FFE_CZ_TIMER_MODE_TRIG_START 2
+#define FFE_CZ_TIMER_MODE_IMMED_START 1
+#define FFE_CZ_TIMER_MODE_DIS 0
+#define FRF_AB_TIMER_MODE_LBN 12
+#define FRF_AB_TIMER_MODE_WIDTH 2
+#define FFE_AB_TIMER_MODE_INT_HLDOFF 2
+#define FFE_AB_TIMER_MODE_TRIG_START 2
+#define FFE_AB_TIMER_MODE_IMMED_START 1
+#define FFE_AB_TIMER_MODE_DIS 0
+#define FRF_CZ_TIMER_VAL_LBN 0
+#define FRF_CZ_TIMER_VAL_WIDTH 14
+#define FRF_AB_TIMER_VAL_LBN 0
+#define FRF_AB_TIMER_VAL_WIDTH 12
+
+
+/*
+ * FR_BZ_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_BZ_TX_PACE_TBL_OFST 0x00f80000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2 */
+#define FR_AZ_TX_PACE_TBL_STEP 16
+#define FR_CZ_TX_PACE_TBL_ROWS 1024
+#define FR_BB_TX_PACE_TBL_ROWS 4096
+/*
+ * FR_AA_TX_PACE_TBL(128bit):
+ * Transmit pacing table
+ */
+#define FR_AA_TX_PACE_TBL_OFST 0x00f80040
+/* falcona0=char_func_bar0 */
+/* FR_AZ_TX_PACE_TBL_STEP 16 */
+#define FR_AA_TX_PACE_TBL_ROWS 4092
+
+#define FRF_AZ_TX_PACE_LBN 0
+#define FRF_AZ_TX_PACE_WIDTH 5
+
+
+/*
+ * FR_BZ_RX_INDIRECTION_TBL(7bit):
+ * RX Indirection Table
+ */
+#define FR_BZ_RX_INDIRECTION_TBL_OFST 0x00fb0000
+/* falconb0,sienaa0=net_func_bar2 */
+#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+
+#define FRF_BZ_IT_QUEUE_LBN 0
+#define FRF_BZ_IT_QUEUE_WIDTH 6
+
+
+/*
+ * FR_CZ_TX_FILTER_TBL0(128bit):
+ * TCP/IPv4 Transmit filter table
+ */
+#define FR_CZ_TX_FILTER_TBL0_OFST 0x00fc0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
+
+#define FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TIFT_DEST_IP_LBN 64
+#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define FRF_CZ_TIFT_SRC_IP_LBN 16
+#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+
+/*
+ * FR_CZ_TX_MAC_FILTER_TBL0(128bit):
+ * Transmit Ethernet filter table
+ */
+#define FR_CZ_TX_MAC_FILTER_TBL0_OFST 0x00fe0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+
+#define FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define FRF_CZ_TMFT_SRC_MAC_DW0_LBN 12
+#define FRF_CZ_TMFT_SRC_MAC_DW0_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_DW1_LBN 44
+#define FRF_CZ_TMFT_SRC_MAC_DW1_WIDTH 16
+#define FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+
+/*
+ * FR_CZ_MC_TREG_SMEM(32bit):
+ * MC Shared Memory
+ */
+#define FR_CZ_MC_TREG_SMEM_OFST 0x00ff0000
+/* sienaa0=net_func_bar2 */
+#define FR_CZ_MC_TREG_SMEM_STEP 4
+#define FR_CZ_MC_TREG_SMEM_ROWS 512
+
+#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_BB_MSIX_VECTOR_TABLE_OFST 0x00ff0000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/*
+ * FR_CZ_MSIX_VECTOR_TABLE(128bit):
+ * MSIX Vector Table
+ */
+#define FR_CZ_MSIX_VECTOR_TABLE_OFST 0x00000000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+
+#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+
+/*
+ * FR_BB_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_BB_MSIX_PBA_TABLE_OFST 0x00ff2000
+/* falconb0=net_func_bar2 */
+#define FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define FR_BB_MSIX_PBA_TABLE_ROWS 2
+/*
+ * FR_CZ_MSIX_PBA_TABLE(32bit):
+ * MSIX Pending Bit Array
+ */
+#define FR_CZ_MSIX_PBA_TABLE_OFST 0x00008000
+/* sienaa0=pci_f0_bar4 */
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
+
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+
+/*
+ * FR_AZ_SRM_DBG_REG(64bit):
+ * SRAM debug access
+ */
+#define FR_AZ_SRM_DBG_REG_OFST 0x03000000
+/* sienaa0=net_func_bar2,falconb0=net_func_bar2,falcona0=char_func_bar0 */
+#define FR_AZ_SRM_DBG_REG_STEP 8
+
+#define FR_CZ_SRM_DBG_REG_ROWS 262144
+#define FR_AB_SRM_DBG_REG_ROWS 2097152
+
+#define FRF_AZ_SRM_DBG_LBN 0
+#define FRF_AZ_SRM_DBG_WIDTH 64
+#define FRF_AZ_SRM_DBG_DW0_LBN 0
+#define FRF_AZ_SRM_DBG_DW0_WIDTH 32
+#define FRF_AZ_SRM_DBG_DW1_LBN 32
+#define FRF_AZ_SRM_DBG_DW1_WIDTH 32
+
+
+/*
+ * FR_AA_INT_ACK_CHAR(32bit):
+ * CHAR interrupt acknowledge register
+ */
+#define FR_AA_INT_ACK_CHAR_OFST 0x00000060
+/* falcona0=char_func_bar0 */
+
+#define FRF_AA_INT_ACK_CHAR_FIELD_LBN 0
+#define FRF_AA_INT_ACK_CHAR_FIELD_WIDTH 32
+
+
+/* FS_DRIVER_EV */
+#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define FSE_AZ_TX_DSC_ERROR_EV 15
+#define FSE_AZ_RX_DSC_ERROR_EV 14
+#define FSE_AZ_RX_RECOVER_EV 11
+#define FSE_AZ_TIMER_EV 10
+#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define FSE_AZ_WAKE_UP_EV 6
+#define FSE_AZ_SRM_UPD_DONE_EV 5
+#define FSE_AZ_EVQ_NOT_EN_EV 3
+#define FSE_AZ_EVQ_INIT_DONE_EV 2
+#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+
+/* FS_EVENT_ENTRY */
+#define FSF_AZ_EV_CODE_LBN 60
+#define FSF_AZ_EV_CODE_WIDTH 4
+#define FSE_AZ_EV_CODE_USER_EV 8
+#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define FSE_AZ_EV_CODE_DRIVER_EV 5
+#define FSE_AZ_EV_CODE_TX_EV 2
+#define FSE_AZ_EV_CODE_RX_EV 0
+#define FSF_AZ_EV_DATA_LBN 0
+#define FSF_AZ_EV_DATA_WIDTH 60
+#define FSF_AZ_EV_DATA_DW0_LBN 0
+#define FSF_AZ_EV_DATA_DW0_WIDTH 32
+#define FSF_AZ_EV_DATA_DW1_LBN 32
+#define FSF_AZ_EV_DATA_DW1_WIDTH 28
+
+
+/* FS_GLOBAL_EV */
+#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 12
+#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_LBN 11
+#define FSF_BZ_GLB_EV_XG_MNT_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define FSF_AZ_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_LBN 9
+#define FSF_AZ_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_LBN 7
+#define FSF_AZ_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+
+/* FS_RX_EV */
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define FSF_AZ_RX_EV_PORT_LBN 30
+#define FSF_AZ_RX_EV_PORT_WIDTH 1
+#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define FSF_AZ_RX_EV_SOP_LBN 15
+#define FSF_AZ_RX_EV_SOP_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_RX_KER_DESC */
+#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_RX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_RX_USER_DESC */
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+
+/* FS_TX_EV */
+#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define FSF_AZ_TX_EV_PORT_LBN 16
+#define FSF_AZ_TX_EV_PORT_WIDTH 1
+#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define FSF_AZ_TX_EV_COMP_LBN 12
+#define FSF_AZ_TX_EV_COMP_WIDTH 1
+#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+
+/* FS_TX_KER_DESC */
+#define FSF_AZ_TX_KER_CONT_LBN 62
+#define FSF_AZ_TX_KER_CONT_WIDTH 1
+#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define FSF_AZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define FSF_AZ_TX_KER_BUF_ADDR_DW1_WIDTH 14
+
+
+/* FS_TX_USER_DESC */
+#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define FSF_AZ_TX_USER_CONT_LBN 46
+#define FSF_AZ_TX_USER_CONT_WIDTH 1
+#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+
+/* FS_USER_EV */
+#define FSF_CZ_USER_QID_LBN 32
+#define FSF_CZ_USER_QID_WIDTH 10
+#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+
+/* FS_NET_IVEC */
+#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+
+
+/**************************************************************************
+ *
+ * Falcon non-volatile configuration
+ *
+ **************************************************************************
+ */
+
+
+#define FR_AZ_TX_PACE_TBL_OFST FR_BZ_TX_PACE_TBL_OFST
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+
+
+#endif /* _SYS_EFX_REGS_H */
diff --git a/drivers/net/sfc/base/efx_regs_ef10.h b/drivers/net/sfc/base/efx_regs_ef10.h
new file mode 100644
index 00000000..11a91848
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs_ef10.h
@@ -0,0 +1,571 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_EF10_REGS_H
+#define _SYS_EFX_EF10_REGS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**************************************************************************
+ * NOTE: the line below marks the start of the autogenerated section
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/*
+ * BIU_HW_REV_ID_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_HW_REV_ID_REG_OFST 0x00000000
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_HW_REV_ID_REG_RESET 0xeb14face
+
+
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+
+/*
+ * BIU_MC_SFT_STATUS_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_OFST 0x00000010
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_ROWS 8
+#define ER_DZ_BIU_MC_SFT_STATUS_REG_RESET 0x1111face
+
+
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+
+/*
+ * BIU_INT_ISR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_BIU_INT_ISR_REG_OFST 0x00000090
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_BIU_INT_ISR_REG_RESET 0x0
+
+
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+
+/*
+ * MC_DB_LWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_LWRD_REG_OFST 0x00000200
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_MC_DB_LWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+
+/*
+ * MC_DB_HWRD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_MC_DB_HWRD_REG_OFST 0x00000204
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_MC_DB_HWRD_REG_RESET 0x0
+
+
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+
+/*
+ * EVQ_RPTR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_RPTR_REG_OFST 0x00000400
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_RPTR_REG_STEP 8192
+#define ER_DZ_EVQ_RPTR_REG_ROWS 2048
+#define ER_DZ_EVQ_RPTR_REG_RESET 0x0
+
+
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+
+/*
+ * EVQ_TMR_REG(32bit):
+ *
+ */
+
+#define ER_DZ_EVQ_TMR_REG_OFST 0x00000420
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_EVQ_TMR_REG_STEP 8192
+#define ER_DZ_EVQ_TMR_REG_ROWS 2048
+#define ER_DZ_EVQ_TMR_REG_RESET 0x0
+
+
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+
+/*
+ * RX_DESC_UPD_REG(32bit):
+ *
+ */
+
+#define ER_DZ_RX_DESC_UPD_REG_OFST 0x00000830
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_RX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_RX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_RX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/*
+ * TX_DESC_UPD_REG(96bit):
+ *
+ */
+
+#define ER_DZ_TX_DESC_UPD_REG_OFST 0x00000a10
+/* hunta0,medforda0=pcie_pf_bar2 */
+#define ER_DZ_TX_DESC_UPD_REG_STEP 8192
+#define ER_DZ_TX_DESC_UPD_REG_ROWS 2048
+#define ER_DZ_TX_DESC_UPD_REG_RESET 0x0
+
+
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+
+/* ES_DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_DW0_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_DW0_WIDTH 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_LBN 32
+#define ESF_DZ_DRV_SUB_DATA_DW1_WIDTH 24
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+
+/* ES_EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_DW0_LBN 0
+#define ESF_DZ_EV_DATA_DW0_WIDTH 32
+#define ESF_DZ_EV_DATA_DW1_LBN 32
+#define ESF_DZ_EV_DATA_DW1_WIDTH 28
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+
+/* ES_MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_DW0_LBN 0
+#define ESF_DZ_MC_SOFT_DW0_WIDTH 32
+#define ESF_DZ_MC_SOFT_DW1_LBN 32
+#define ESF_DZ_MC_SOFT_DW1_WIDTH 26
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+
+/* ES_RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DD_RX_EV_RSVD2_LBN 54
+#define ESF_DD_RX_EV_RSVD2_WIDTH 4
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_RX_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_RX_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_RX_EV_RSVD2_LBN 54
+#define ESF_EZ_RX_EV_RSVD2_WIDTH 2
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DD_RX_EV_SOFT1_LBN 32
+#define ESF_DD_RX_EV_SOFT1_WIDTH 3
+#define ESF_EZ_RX_EV_SOFT1_LBN 34
+#define ESF_EZ_RX_EV_SOFT1_WIDTH 1
+#define ESF_EZ_RX_ENCAP_HDR_LBN 32
+#define ESF_EZ_RX_ENCAP_HDR_WIDTH 2
+#define ESE_EZ_ENCAP_HDR_GRE 2
+#define ESE_EZ_ENCAP_HDR_VXLAN 1
+#define ESE_EZ_ENCAP_HDR_NONE 0
+#define ESF_DD_RX_EV_RSVD1_LBN 30
+#define ESF_DD_RX_EV_RSVD1_WIDTH 2
+#define ESF_EZ_RX_EV_RSVD1_LBN 31
+#define ESF_EZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_EZ_RX_ABORT_LBN 30
+#define ESF_EZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+
+/* ES_RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_RX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_LBN 8
+#define ESF_DZ_TX_OPTION_TS_AT_TXDP_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_LBN 7
+#define ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_LBN 6
+#define ESF_DZ_TX_OPTION_INNER_IP_CSUM_WIDTH 1
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+
+/* ES_TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DD_TX_EV_RSVD_LBN 48
+#define ESF_DD_TX_EV_RSVD_WIDTH 10
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_LBN 57
+#define ESF_EZ_TCP_UDP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_LBN 56
+#define ESF_EZ_IP_INNER_CHKSUM_ERR_WIDTH 1
+#define ESF_EZ_TX_EV_RSVD_LBN 48
+#define ESF_EZ_TX_EV_RSVD_WIDTH 8
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DD_TX_SOFT1_LBN 24
+#define ESF_DD_TX_SOFT1_WIDTH 8
+#define ESF_EZ_TX_CAN_MERGE_LBN 31
+#define ESF_EZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_EZ_TX_SOFT1_LBN 24
+#define ESF_EZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+
+/* ES_TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_DW0_WIDTH 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_LBN 32
+#define ESF_DZ_TX_KER_BUF_ADDR_DW1_WIDTH 16
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+
+/* ES_TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+
+/* ES_TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2A_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+
+/* TX_TSO_FATSO2B_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_OPTION_TYPE_LBN 56
+#define ESF_DZ_TX_TSO_OPTION_TYPE_WIDTH 4
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B 3
+#define ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A 2
+#define ESE_DZ_TX_TSO_OPTION_DESC_ENCAP 1
+#define ESE_DZ_TX_TSO_OPTION_DESC_NORMAL 0
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_LBN 16
+#define ESF_DZ_TX_TSO_OUTER_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_MSS_LBN 32
+#define ESF_DZ_TX_TSO_TCP_MSS_WIDTH 16
+#define ESF_DZ_TX_TSO_INNER_PE_CSUM_LBN 0
+#define ESF_DZ_TX_TSO_INNER_PE_CSUM_WIDTH 16
+
+
+/* ES_TX_VLAN_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_VLAN_OP_LBN 32
+#define ESF_DZ_TX_VLAN_OP_WIDTH 2
+#define ESF_DZ_TX_VLAN_TAG2_LBN 16
+#define ESF_DZ_TX_VLAN_TAG2_WIDTH 16
+#define ESF_DZ_TX_VLAN_TAG1_LBN 0
+#define ESF_DZ_TX_VLAN_TAG1_WIDTH 16
+
+
+/*************************************************************************
+ * NOTE: the comment line above marks the end of the autogenerated section
+ */
+
+/*
+ * The workaround for bug 35388 requires multiplexing writes through
+ * the ERF_DZ_TX_DESC_WPTR address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT_OFST (ER_DZ_TX_DESC_UPD_REG_OFST + 2 * 4)
+#define ER_DD_EVQ_INDIRECT_STEP ER_DZ_TX_DESC_UPD_REG_STEP
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* Packed stream magic doorbell command */
+#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_LBN 11
+#define ERF_DZ_RX_DESC_MAGIC_DOORBELL_WIDTH 1
+
+#define ERF_DZ_RX_DESC_MAGIC_CMD_LBN 8
+#define ERF_DZ_RX_DESC_MAGIC_CMD_WIDTH 3
+#define ERE_DZ_RX_DESC_MAGIC_CMD_PS_CREDITS 0
+
+#define ERF_DZ_RX_DESC_MAGIC_DATA_LBN 0
+#define ERF_DZ_RX_DESC_MAGIC_DATA_WIDTH 8
+
+/* Packed stream RX packet prefix */
+#define ES_DZ_PS_RX_PREFIX_TSTAMP_LBN 0
+#define ES_DZ_PS_RX_PREFIX_TSTAMP_WIDTH 32
+#define ES_DZ_PS_RX_PREFIX_CAP_LEN_LBN 32
+#define ES_DZ_PS_RX_PREFIX_CAP_LEN_WIDTH 16
+#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_LBN 48
+#define ES_DZ_PS_RX_PREFIX_ORIG_LEN_WIDTH 16
+
+/*
+ * An extra flag for the packed stream mode,
+ * signalling the start of a new buffer
+ */
+#define ESF_DZ_RX_EV_ROTATE_LBN 53
+#define ESF_DZ_RX_EV_ROTATE_WIDTH 1
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_EF10_REGS_H */
diff --git a/drivers/net/sfc/base/efx_regs_mcdi.h b/drivers/net/sfc/base/efx_regs_mcdi.h
new file mode 100644
index 00000000..66896fbb
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs_mcdi.h
@@ -0,0 +1,15690 @@
+/*-
+ * Copyright 2008-2013 Solarflare Communications Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*! \cidoxg_firmware_mc_cmd */
+
+#ifndef _SIENA_MC_DRIVER_PCOL_H
+#define _SIENA_MC_DRIVER_PCOL_H
+
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode. This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
+
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define MC_SMEM_P0_DOORBELL_OFST 0x000
+#define MC_SMEM_P1_DOORBELL_OFST 0x004
+/* The rest of these are firmware-defined */
+#define MC_SMEM_P0_PDU_OFST 0x008
+#define MC_SMEM_P1_PDU_OFST 0x108
+#define MC_SMEM_PDU_LEN 0x100
+#define MC_SMEM_P0_PTP_TIME_OFST 0x7f0
+#define MC_SMEM_P0_STATUS_OFST 0x7f8
+#define MC_SMEM_P1_STATUS_OFST 0x7fc
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#ifdef WITH_MCDI_V2
+#define MCDI_PCOL_VERSION 2
+#else
+#define MCDI_PCOL_VERSION 1
+#endif
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ * 0 7 8 16 20 22 23 24 31
+ * | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ * | | |
+ * | | \--- Response
+ * | \------- Error
+ * \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writting
+ * back into shared memory, or by writting out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#ifdef WITH_MCDI_V2
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+#else
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V1
+#endif
+
+
+/* The MC can generate events for two reasons:
+ * - To advance a shared memory request if XFLAGS_EVREQ was set
+ * - As a notification (link state, i2c event), controlled
+ * via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ * 0 32 33 36 44 52 60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ * |
+ * \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ * - LEVEL==INFO Command succeeded
+ * - LEVEL==ERR Command failed
+ *
+ * 0 8 16 24 32
+ * | Seq | Datalen | Errno | Rsvd |
+ *
+ * These fields are taken directly out of the standard MCDI header, i.e.,
+ * LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header. An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0. This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ * 0 7 8
+ * | command | Resync | = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ * 56 60 63
+ * | Rsvd | Code | = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN 0x100c
+/* The requested operation might require the
+ command to be passed between MCs, and the
+ transport doesn't support that. Should
+ only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* Returned by MC_CMD_TESTASSERT if the action that should
+ * have caused an assertion failed to do so. */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+ for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0 0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1 0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2 0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3 0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4 0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5 0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6 0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7 0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
+/* Points to the recovery mode entry point. */
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
+ (1 << MC_CMD_READ32) | \
+ (1 << MC_CMD_WRITE32) | \
+ (1 << MC_CMD_COPYCODE) | \
+ (1 << MC_CMD_GET_VERSION), \
+ 0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x) \
+ (MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n) \
+ (MC_CMD_DBI_WRITE_IN_DBIWROP_OFST + \
+ MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST + \
+ (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n) (((n) & 0xff) << 16)
+
+
+#ifdef WITH_MCDI_V2
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+#endif
+
+/* MCDI_EVENT structuredef */
+#define MCDI_EVENT_LEN 8
+#define MCDI_EVENT_CONT_LBN 32
+#define MCDI_EVENT_CONT_WIDTH 1
+#define MCDI_EVENT_LEVEL_LBN 33
+#define MCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
+#define MCDI_EVENT_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define MCDI_EVENT_FWALERT_DATA_LBN 8
+#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define MCDI_EVENT_FWALERT_REASON_LBN 0
+#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define MCDI_EVENT_FLR_VF_LBN 0
+#define MCDI_EVENT_FLR_VF_WIDTH 8
+#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define MCDI_EVENT_MUM_WATCHDOG 0x3
+#define MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_DATA_LBN 0
+#define MCDI_EVENT_DATA_WIDTH 32
+#define MCDI_EVENT_SRC_LBN 36
+#define MCDI_EVENT_SRC_WIDTH 8
+#define MCDI_EVENT_EV_CODE_LBN 60
+#define MCDI_EVENT_EV_CODE_WIDTH 4
+#define MCDI_EVENT_CODE_LBN 44
+#define MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define MCDI_EVENT_SW_EVENT 0x0
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
+#define MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_SECONDS_OFST 0
+#define MCDI_EVENT_PTP_SECONDS_LBN 0
+#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
+#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define MCDI_EVENT_PTP_MINOR_OFST 0
+#define MCDI_EVENT_PTP_MINOR_LBN 0
+#define MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
+#define MCDI_EVENT_PTP_UUID_OFST 0
+#define MCDI_EVENT_PTP_UUID_LBN 0
+#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: Tick event from PTP clock */
+#define FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PTP_STATE_OFST 0
+#define FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define FCDI_EVENT_PTP_STATE_LBN 0
+#define FCDI_EVENT_PTP_STATE_WIDTH 32
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+
+/* MUM_EVENT structuredef */
+#define MUM_EVENT_LEN 8
+#define MUM_EVENT_CONT_LBN 32
+#define MUM_EVENT_CONT_WIDTH 1
+#define MUM_EVENT_LEVEL_LBN 33
+#define MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MUM_EVENT_LEVEL_FATAL 0x3
+#define MUM_EVENT_DATA_OFST 0
+#define MUM_EVENT_SENSOR_ID_LBN 0
+#define MUM_EVENT_SENSOR_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MUM_EVENT_SENSOR_STATE_LBN 8
+#define MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define MUM_EVENT_PORT_PHY_READY_LBN 0
+#define MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define MUM_EVENT_DATA_LBN 0
+#define MUM_EVENT_DATA_WIDTH 32
+#define MUM_EVENT_SRC_LBN 36
+#define MUM_EVENT_SRC_WIDTH 8
+#define MUM_EVENT_EV_CODE_LBN 60
+#define MUM_EVENT_EV_CODE_WIDTH 4
+#define MUM_EVENT_CODE_LBN 44
+#define MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define MUM_EVENT_SENSOR_DATA_OFST 0
+#define MUM_EVENT_SENSOR_DATA_LBN 0
+#define MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory.
+ */
+#define MC_CMD_READ32 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ32_IN msgrequest */
+#define MC_CMD_READ32_IN_LEN 8
+#define MC_CMD_READ32_IN_ADDR_OFST 0
+#define MC_CMD_READ32_IN_NUMWORDS_OFST 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define MC_CMD_READ32_OUT_LENMIN 4
+#define MC_CMD_READ32_OUT_LENMAX 252
+#define MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define MC_CMD_WRITE32_IN_LENMIN 8
+#define MC_CMD_WRITE32_IN_LENMAX 252
+#define MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump.
+ */
+#define MC_CMD_COPYCODE 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
+ */
+#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
+#define MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
+#define MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
+/* Destination address */
+#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+/* Address of where to jump after copy. */
+#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x4
+#undef MC_CMD_0x4_PRIVILEGE_CTG
+
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
+#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+#undef MC_CMD_0x5_PRIVILEGE_CTG
+
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
+#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
+#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
+#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
+/* Failing thread address */
+#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
+ */
+#define MC_CMD_LOG_CTRL 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about the MC firmware.
+ */
+#define MC_CMD_GET_VERSION 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
+#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_VERSION_OUT_LEN 32
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
+
+
+/***********************************/
+/* MC_CMD_FC
+ * Perform an FC operation
+ */
+#define MC_CMD_FC 0x9
+
+/* MC_CMD_FC_IN msgrequest */
+#define MC_CMD_FC_IN_LEN 4
+#define MC_CMD_FC_IN_OP_HDR_OFST 0
+#define MC_CMD_FC_IN_OP_LBN 0
+#define MC_CMD_FC_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to FC. */
+#define MC_CMD_FC_OP_NULL 0x1
+/* enum: Unused opcode */
+#define MC_CMD_FC_OP_UNUSED 0x2
+/* enum: MAC driver commands */
+#define MC_CMD_FC_OP_MAC 0x3
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_READ32 0x4
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_WRITE32 0x5
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_READ 0x6
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_WRITE 0x7
+/* enum: FC firmware Version */
+#define MC_CMD_FC_OP_GET_VERSION 0x8
+/* enum: Read FC memory */
+#define MC_CMD_FC_OP_TRC_RX_READ 0x9
+/* enum: Write to FC memory */
+#define MC_CMD_FC_OP_TRC_RX_WRITE 0xa
+/* enum: SFP parameters */
+#define MC_CMD_FC_OP_SFP 0xb
+/* enum: DDR3 test */
+#define MC_CMD_FC_OP_DDR_TEST 0xc
+/* enum: Get Crash context from FC */
+#define MC_CMD_FC_OP_GET_ASSERT 0xd
+/* enum: Get FPGA Build registers */
+#define MC_CMD_FC_OP_FPGA_BUILD 0xe
+/* enum: Read map support commands */
+#define MC_CMD_FC_OP_READ_MAP 0xf
+/* enum: FC Capabilities */
+#define MC_CMD_FC_OP_CAPABILITIES 0x10
+/* enum: FC Global flags */
+#define MC_CMD_FC_OP_GLOBAL_FLAGS 0x11
+/* enum: FC IO using relative addressing modes */
+#define MC_CMD_FC_OP_IO_REL 0x12
+/* enum: FPGA link information */
+#define MC_CMD_FC_OP_UHLINK 0x13
+/* enum: Configure loopbacks and link on FPGA ports */
+#define MC_CMD_FC_OP_SET_LINK 0x14
+/* enum: Licensing operations relating to AOE */
+#define MC_CMD_FC_OP_LICENSE 0x15
+/* enum: Startup information to the FC */
+#define MC_CMD_FC_OP_STARTUP 0x16
+/* enum: Configure a DMA read */
+#define MC_CMD_FC_OP_DMA 0x17
+/* enum: Configure a timed read */
+#define MC_CMD_FC_OP_TIMED_READ 0x18
+/* enum: Control UART logging */
+#define MC_CMD_FC_OP_LOG 0x19
+/* enum: Get the value of a given clock_id */
+#define MC_CMD_FC_OP_CLOCK 0x1a
+/* enum: DDR3/QDR3 parameters */
+#define MC_CMD_FC_OP_DDR 0x1b
+/* enum: PTP and timestamp control */
+#define MC_CMD_FC_OP_TIMESTAMP 0x1c
+/* enum: Commands for SPI Flash interface */
+#define MC_CMD_FC_OP_SPI 0x1d
+/* enum: Commands for diagnostic components */
+#define MC_CMD_FC_OP_DIAG 0x1e
+/* enum: External AOE port. */
+#define MC_CMD_FC_IN_PORT_EXT_OFST 0x0
+/* enum: Internal AOE port. */
+#define MC_CMD_FC_IN_PORT_INT_OFST 0x40
+
+/* MC_CMD_FC_IN_NULL msgrequest */
+#define MC_CMD_FC_IN_NULL_LEN 4
+#define MC_CMD_FC_IN_CMD_OFST 0
+
+/* MC_CMD_FC_IN_PHY msgrequest */
+#define MC_CMD_FC_IN_PHY_LEN 5
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FC PHY driver operation code */
+#define MC_CMD_FC_IN_PHY_OP_OFST 4
+#define MC_CMD_FC_IN_PHY_OP_LEN 1
+/* enum: PHY init handler */
+#define MC_CMD_FC_OP_PHY_OP_INIT 0x1
+/* enum: PHY reconfigure handler */
+#define MC_CMD_FC_OP_PHY_OP_RECONFIGURE 0x2
+/* enum: PHY reboot handler */
+#define MC_CMD_FC_OP_PHY_OP_REBOOT 0x3
+/* enum: PHY get_supported_cap handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_SUPPORTED_CAP 0x4
+/* enum: PHY get_config handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONFIG 0x5
+/* enum: PHY get_media_info handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_MEDIA_INFO 0x6
+/* enum: PHY set_led handler */
+#define MC_CMD_FC_OP_PHY_OP_SET_LED 0x7
+/* enum: PHY lasi_interrupt handler */
+#define MC_CMD_FC_OP_PHY_OP_LASI_INTERRUPT 0x8
+/* enum: PHY check_link handler */
+#define MC_CMD_FC_OP_PHY_OP_CHECK_LINK 0x9
+/* enum: PHY fill_stats handler */
+#define MC_CMD_FC_OP_PHY_OP_FILL_STATS 0xa
+/* enum: PHY bpx_link_state_changed handler */
+#define MC_CMD_FC_OP_PHY_OP_BPX_LINK_STATE_CHANGED 0xb
+/* enum: PHY get_state handler */
+#define MC_CMD_FC_OP_PHY_OP_GET_STATE 0xc
+/* enum: PHY start_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_START_BIST 0xd
+/* enum: PHY poll_bist handler */
+#define MC_CMD_FC_OP_PHY_OP_POLL_BIST 0xe
+/* enum: PHY nvram_test handler */
+#define MC_CMD_FC_OP_PHY_OP_NVRAM_TEST 0xf
+/* enum: PHY relinquish handler */
+#define MC_CMD_FC_OP_PHY_OP_RELINQUISH_SPI 0x10
+/* enum: PHY read connection from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_CONNECTION 0x11
+/* enum: PHY read flags from FC - may be not required */
+#define MC_CMD_FC_OP_PHY_OP_GET_FLAGS 0x12
+
+/* MC_CMD_FC_IN_PHY_INIT msgrequest */
+#define MC_CMD_FC_IN_PHY_INIT_LEN 4
+#define MC_CMD_FC_IN_PHY_CMD_OFST 0
+
+/* MC_CMD_FC_IN_MAC msgrequest */
+#define MC_CMD_FC_IN_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_MAC_HEADER_OFST 4
+#define MC_CMD_FC_IN_MAC_OP_LBN 0
+#define MC_CMD_FC_IN_MAC_OP_WIDTH 8
+/* enum: MAC reconfigure handler */
+#define MC_CMD_FC_OP_MAC_OP_RECONFIGURE 0x1
+/* enum: MAC Set command - same as MC_CMD_SET_MAC */
+#define MC_CMD_FC_OP_MAC_OP_SET_LINK 0x2
+/* enum: MAC statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_STATS 0x3
+/* enum: MAC RX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_RX_STATS 0x6
+/* enum: MAC TX statistics */
+#define MC_CMD_FC_OP_MAC_OP_GET_TX_STATS 0x7
+/* enum: MAC Read status */
+#define MC_CMD_FC_OP_MAC_OP_READ_STATUS 0x8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_MAC_PORT_TYPE_WIDTH 8
+/* enum: External FPGA port. */
+#define MC_CMD_FC_PORT_EXT 0x0
+/* enum: Internal Siena-facing FPGA ports. */
+#define MC_CMD_FC_PORT_INT 0x1
+#define MC_CMD_FC_IN_MAC_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_MAC_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_MAC_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_MAC_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_IN_MAC_RECONFIGURE msgrequest */
+#define MC_CMD_FC_IN_MAC_RECONFIGURE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_MAC_SET_LINK_LEN 32
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MTU size */
+#define MC_CMD_FC_IN_MAC_SET_LINK_MTU_OFST 8
+/* Drain Tx FIFO */
+#define MC_CMD_FC_IN_MAC_SET_LINK_DRAIN_OFST 12
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LEN 8
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_LO_OFST 16
+#define MC_CMD_FC_IN_MAC_SET_LINK_ADDR_HI_OFST 20
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_OFST 24
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_LBN 0
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_UNICAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_LBN 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_REJECT_BRDCAST_WIDTH 1
+#define MC_CMD_FC_IN_MAC_SET_LINK_FCNTL_OFST 28
+
+/* MC_CMD_FC_IN_MAC_READ_STATUS msgrequest */
+#define MC_CMD_FC_IN_MAC_READ_STATUS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_RX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_RX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_TX_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_TX_STATS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_MAC_GET_STATS msgrequest */
+#define MC_CMD_FC_IN_MAC_GET_STATS_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_MAC_HEADER_OFST 4 */
+/* MC Statistics index */
+#define MC_CMD_FC_IN_MAC_GET_STATS_STATS_INDEX_OFST 8
+#define MC_CMD_FC_IN_MAC_GET_STATS_FLAGS_OFST 12
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_LBN 0
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_ALL_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_LBN 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_CLEAR_WIDTH 1
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_LBN 2
+#define MC_CMD_FC_IN_MAC_GET_STATS_UPDATE_WIDTH 1
+/* Number of statistics to read */
+#define MC_CMD_FC_IN_MAC_GET_STATS_NUM_OFST 16
+#define MC_CMD_FC_MAC_NSTATS_PER_BLOCK 0x1e /* enum */
+#define MC_CMD_FC_MAC_NBYTES_PER_STAT 0x8 /* enum */
+
+/* MC_CMD_FC_IN_READ32 msgrequest */
+#define MC_CMD_FC_IN_READ32_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_READ32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_READ32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_READ32_NUMWORDS_OFST 12
+
+/* MC_CMD_FC_IN_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_WRITE32_LENMIN 16
+#define MC_CMD_FC_IN_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_WRITE32_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_WRITE32_ADDR_HI_OFST 4
+#define MC_CMD_FC_IN_WRITE32_ADDR_LO_OFST 8
+#define MC_CMD_FC_IN_WRITE32_BUFFER_OFST 12
+#define MC_CMD_FC_IN_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_WRITE32_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_TRC_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_READ_CHANNEL_OFST 8
+
+/* MC_CMD_FC_IN_TRC_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_WRITE_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_WRITE_DATA_NUM 4
+
+/* MC_CMD_FC_IN_GET_VERSION msgrequest */
+#define MC_CMD_FC_IN_GET_VERSION_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_TRC_RX_READ msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_READ_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_RX_READ_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_READ_CHANNEL_OFST 8
+
+/* MC_CMD_FC_IN_TRC_RX_WRITE msgrequest */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TRC_RX_WRITE_TRC_OFST 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_CHANNEL_OFST 8
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_OFST 12
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_LEN 4
+#define MC_CMD_FC_IN_TRC_RX_WRITE_DATA_NUM 2
+
+/* MC_CMD_FC_IN_SFP msgrequest */
+#define MC_CMD_FC_IN_SFP_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Link speed is 100, 1000, 10000, 40000 */
+#define MC_CMD_FC_IN_SFP_SPEED_OFST 4
+/* Length of copper cable - zero when not relevant (e.g. if cable is fibre) */
+#define MC_CMD_FC_IN_SFP_COPPER_LEN_OFST 8
+/* Not relevant for cards with QSFP modules. For older cards, true if module is
+ * a dual speed SFP+ module.
+ */
+#define MC_CMD_FC_IN_SFP_DUAL_SPEED_OFST 12
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_IN_SFP_PRESENT_OFST 16
+/* The type of the SFP+ Module. For later cards with QSFP modules, this field
+ * is unused and the type is communicated by other means.
+ */
+#define MC_CMD_FC_IN_SFP_TYPE_OFST 20
+/* Capabilities corresponding to 1 bits. */
+#define MC_CMD_FC_IN_SFP_CAPS_OFST 24
+
+/* MC_CMD_FC_IN_DDR_TEST msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4
+#define MC_CMD_FC_IN_DDR_TEST_OP_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_OP_WIDTH 8
+/* enum: DRAM Test Start */
+#define MC_CMD_FC_OP_DDR_TEST_START 0x1
+/* enum: DRAM Test Poll */
+#define MC_CMD_FC_OP_DDR_TEST_POLL 0x2
+
+/* MC_CMD_FC_IN_DDR_TEST_START msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_START_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_DDR_TEST_START_MASK_OFST 8
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_LBN 0
+#define MC_CMD_FC_IN_DDR_TEST_START_T0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_LBN 1
+#define MC_CMD_FC_IN_DDR_TEST_START_T1_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_LBN 2
+#define MC_CMD_FC_IN_DDR_TEST_START_B0_WIDTH 1
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_LBN 3
+#define MC_CMD_FC_IN_DDR_TEST_START_B1_WIDTH 1
+
+/* MC_CMD_FC_IN_DDR_TEST_POLL msgrequest */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_LEN 12
+#define MC_CMD_FC_IN_DDR_TEST_CMD_OFST 0
+/* MC_CMD_FC_IN_DDR_TEST_HEADER_OFST 4 */
+/* Clear previous test result and prepare for restarting DDR test */
+#define MC_CMD_FC_IN_DDR_TEST_POLL_CLEAR_RESULT_FOR_DDR_TEST_OFST 8
+
+/* MC_CMD_FC_IN_GET_ASSERT msgrequest */
+#define MC_CMD_FC_IN_GET_ASSERT_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_FPGA_BUILD msgrequest */
+#define MC_CMD_FC_IN_FPGA_BUILD_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FPGA build info operation code */
+#define MC_CMD_FC_IN_FPGA_BUILD_OP_OFST 4
+/* enum: Get the build registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD 0x1
+/* enum: Get the services registers */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES 0x2
+/* enum: Get the BSP version */
+#define MC_CMD_FC_IN_FPGA_BUILD_BSP_VERSION 0x3
+/* enum: Get build register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_BUILD_V2 0x4
+/* enum: GEt the services register for V2 (SFA974X) */
+#define MC_CMD_FC_IN_FPGA_BUILD_SERVICES_V2 0x5
+
+/* MC_CMD_FC_IN_READ_MAP msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4
+#define MC_CMD_FC_IN_READ_MAP_OP_LBN 0
+#define MC_CMD_FC_IN_READ_MAP_OP_WIDTH 8
+/* enum: Get the number of map regions */
+#define MC_CMD_FC_OP_READ_MAP_COUNT 0x1
+/* enum: Get the specified map */
+#define MC_CMD_FC_OP_READ_MAP_INDEX 0x2
+
+/* MC_CMD_FC_IN_READ_MAP_COUNT msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_COUNT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_READ_MAP_INDEX msgrequest */
+#define MC_CMD_FC_IN_READ_MAP_INDEX_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_READ_MAP_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_MAP_INDEX_OFST 8
+
+/* MC_CMD_FC_IN_CAPABILITIES msgrequest */
+#define MC_CMD_FC_IN_CAPABILITIES_LEN 4
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+
+/* MC_CMD_FC_IN_GLOBAL_FLAGS msgrequest */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_FLAGS_OFST 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_LBN 0
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_CABLE_PLUGGED_IN_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_LBN 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_RX_TUNING_LINK_MONITORING_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_LBN 2
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_DFE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_LBN 3
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_EYE_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_LBN 4
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_1D_TUNING_ENABLE_WIDTH 1
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_LBN 5
+#define MC_CMD_FC_IN_GLOBAL_FLAGS_OFFCAL_ENABLE_WIDTH 1
+
+/* MC_CMD_FC_IN_IO_REL msgrequest */
+#define MC_CMD_FC_IN_IO_REL_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_IO_REL_HEADER_OFST 4
+#define MC_CMD_FC_IN_IO_REL_OP_LBN 0
+#define MC_CMD_FC_IN_IO_REL_OP_WIDTH 8
+/* enum: Get the base address that the FC applies to relative commands */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR 0x1
+/* enum: Read data */
+#define MC_CMD_FC_IN_IO_REL_READ32 0x2
+/* enum: Write data */
+#define MC_CMD_FC_IN_IO_REL_WRITE32 0x3
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_LBN 8
+#define MC_CMD_FC_IN_IO_REL_COMP_TYPE_WIDTH 8
+/* enum: Application address space */
+#define MC_CMD_FC_COMP_TYPE_APP_ADDR_SPACE 0x1
+/* enum: Flash address space */
+#define MC_CMD_FC_COMP_TYPE_FLASH 0x2
+
+/* MC_CMD_FC_IN_IO_REL_GET_ADDR msgrequest */
+#define MC_CMD_FC_IN_IO_REL_GET_ADDR_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+
+/* MC_CMD_FC_IN_IO_REL_READ32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_READ32_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_READ32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_READ32_NUMWORDS_OFST 16
+
+/* MC_CMD_FC_IN_IO_REL_WRITE32 msgrequest */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMIN 20
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LENMAX 252
+#define MC_CMD_FC_IN_IO_REL_WRITE32_LEN(num) (16+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_IO_REL_HEADER_OFST 4 */
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_HI_OFST 8
+#define MC_CMD_FC_IN_IO_REL_WRITE32_ADDR_LO_OFST 12
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_OFST 16
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_LEN 4
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_IO_REL_WRITE32_BUFFER_MAXNUM 59
+
+/* MC_CMD_FC_IN_UHLINK msgrequest */
+#define MC_CMD_FC_IN_UHLINK_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_UHLINK_HEADER_OFST 4
+#define MC_CMD_FC_IN_UHLINK_OP_LBN 0
+#define MC_CMD_FC_IN_UHLINK_OP_WIDTH 8
+/* enum: Get PHY configuration info */
+#define MC_CMD_FC_OP_UHLINK_PHY 0x1
+/* enum: Get MAC configuration info */
+#define MC_CMD_FC_OP_UHLINK_MAC 0x2
+/* enum: Get Rx eye table */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE 0x3
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT 0x4
+/* enum: Get Rx eye plot */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT 0x5
+/* enum: Retune Rx settings */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE 0x6
+/* enum: Set loopback mode on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET 0x7
+/* enum: Get loopback mode config state on fpga port */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET 0x8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_LBN 8
+#define MC_CMD_FC_IN_UHLINK_PORT_TYPE_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_LBN 16
+#define MC_CMD_FC_IN_UHLINK_PORT_IDX_WIDTH 8
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_LBN 24
+#define MC_CMD_FC_IN_UHLINK_CMD_FORMAT_WIDTH 8
+/* enum: Default FC command format; the fields PORT_TYPE and PORT_IDX are
+ * irrelevant. Port number is derived from pci_fn; passed in FC header.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_DEFAULT 0x0
+/* enum: Override default port number. Port number determined by fields
+ * PORT_TYPE and PORT_IDX.
+ */
+#define MC_CMD_FC_OP_UHLINK_CMD_FORMAT_PORT_OVERRIDE 0x1
+
+/* MC_CMD_FC_OP_UHLINK_PHY msgrequest */
+#define MC_CMD_FC_OP_UHLINK_PHY_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_MAC msgrequest */
+#define MC_CMD_FC_OP_UHLINK_MAC_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_RX_EYE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_RX_EYE_INDEX_OFST 8
+#define MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK 0x30 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_DUMP_RX_EYE_PLOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT msgrequest */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_DC_GAIN_OFST 8
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_EQ_CONTROL_OFST 12
+#define MC_CMD_FC_OP_UHLINK_READ_RX_EYE_PLOT_INDEX_OFST 16
+#define MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK 0x1e /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_RX_TUNE msgrequest */
+#define MC_CMD_FC_OP_UHLINK_RX_TUNE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_SET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_TYPE_OFST 8
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PCS_SERIAL 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_PRE_CDR 0x1 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_TYPE_PMA_POST_CDR 0x2 /* enum */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_SET_STATE_OFST 12
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_OFF 0x0 /* enum */
+#define MC_CMD_FC_UHLINK_LOOPBACK_STATE_ON 0x1 /* enum */
+
+/* MC_CMD_FC_OP_UHLINK_LOOPBACK_GET msgrequest */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_UHLINK_HEADER_OFST 4 */
+#define MC_CMD_FC_OP_UHLINK_LOOPBACK_GET_TYPE_OFST 8
+
+/* MC_CMD_FC_IN_SET_LINK msgrequest */
+#define MC_CMD_FC_IN_SET_LINK_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* See MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_FC_IN_SET_LINK_MODE_OFST 4
+#define MC_CMD_FC_IN_SET_LINK_SPEED_OFST 8
+#define MC_CMD_FC_IN_SET_LINK_FLAGS_OFST 12
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_LBN 0
+#define MC_CMD_FC_IN_SET_LINK_LOWPOWER_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_LBN 1
+#define MC_CMD_FC_IN_SET_LINK_POWEROFF_WIDTH 1
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_LBN 2
+#define MC_CMD_FC_IN_SET_LINK_TXDIS_WIDTH 1
+
+/* MC_CMD_FC_IN_LICENSE msgrequest */
+#define MC_CMD_FC_IN_LICENSE_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_LICENSE_OP_OFST 4
+#define MC_CMD_FC_IN_LICENSE_UPDATE_LICENSE 0x0 /* enum */
+#define MC_CMD_FC_IN_LICENSE_GET_KEY_STATS 0x1 /* enum */
+
+/* MC_CMD_FC_IN_STARTUP msgrequest */
+#define MC_CMD_FC_IN_STARTUP_LEN 40
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_STARTUP_BASE_OFST 4
+#define MC_CMD_FC_IN_STARTUP_LENGTH_OFST 8
+/* Length of identifier */
+#define MC_CMD_FC_IN_STARTUP_IDLENGTH_OFST 12
+/* Identifier for AOE FPGA */
+#define MC_CMD_FC_IN_STARTUP_ID_OFST 16
+#define MC_CMD_FC_IN_STARTUP_ID_LEN 1
+#define MC_CMD_FC_IN_STARTUP_ID_NUM 24
+
+/* MC_CMD_FC_IN_DMA msgrequest */
+#define MC_CMD_FC_IN_DMA_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DMA_OP_OFST 4
+#define MC_CMD_FC_IN_DMA_STOP 0x0 /* enum */
+#define MC_CMD_FC_IN_DMA_READ 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DMA_STOP msgrequest */
+#define MC_CMD_FC_IN_DMA_STOP_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_DMA_STOP_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_DMA_READ msgrequest */
+#define MC_CMD_FC_IN_DMA_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DMA_OP_OFST 4 */
+#define MC_CMD_FC_IN_DMA_READ_OFFSET_OFST 8
+#define MC_CMD_FC_IN_DMA_READ_LENGTH_OFST 12
+
+/* MC_CMD_FC_IN_TIMED_READ msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMED_READ_OP_OFST 4
+#define MC_CMD_FC_IN_TIMED_READ_SET 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_GET 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR 0x2 /* enum */
+
+/* MC_CMD_FC_IN_TIMED_READ_SET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_SET_LEN 52
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_HANDLE_OFST 8
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_DMA_ADDRESS_HI_OFST 16
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_LO_OFST 20
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_ADDRESS_HI_OFST 24
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_AOE_LENGTH_OFST 28
+/* Length of host transfer (total) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_HOST_LENGTH_OFST 32
+/* Offset back from aoe_address to apply operation to */
+#define MC_CMD_FC_IN_TIMED_READ_SET_OFFSET_OFST 36
+/* Data to apply at offset */
+#define MC_CMD_FC_IN_TIMED_READ_SET_DATA_OFST 40
+#define MC_CMD_FC_IN_TIMED_READ_SET_FLAGS_OFST 44
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_LBN 0
+#define MC_CMD_FC_IN_TIMED_READ_SET_INDIRECT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_LBN 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_DOUBLE_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_LBN 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_EVENT_WIDTH 1
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_LBN 3
+#define MC_CMD_FC_IN_TIMED_READ_SET_PREREAD_WIDTH 2
+#define MC_CMD_FC_IN_TIMED_READ_SET_NONE 0x0 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READ 0x1 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_WRITE 0x2 /* enum */
+#define MC_CMD_FC_IN_TIMED_READ_SET_READWRITE 0x3 /* enum */
+/* Period at which reads are performed (100ms units) */
+#define MC_CMD_FC_IN_TIMED_READ_SET_PERIOD_OFST 48
+
+/* MC_CMD_FC_IN_TIMED_READ_GET msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_GET_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_GET_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_TIMED_READ_CLEAR msgrequest */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_TIMED_READ_OP_OFST 4 */
+/* FC supplied handle */
+#define MC_CMD_FC_IN_TIMED_READ_CLEAR_FC_HANDLE_OFST 8
+
+/* MC_CMD_FC_IN_LOG msgrequest */
+#define MC_CMD_FC_IN_LOG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_LOG_OP_OFST 4
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE 0x0 /* enum */
+#define MC_CMD_FC_IN_LOG_JTAG_UART 0x1 /* enum */
+
+/* MC_CMD_FC_IN_LOG_ADDR_RANGE msgrequest */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* Partition offset into flash */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_OFFSET_OFST 8
+/* Partition length */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_LENGTH_OFST 12
+/* Partition erase size */
+#define MC_CMD_FC_IN_LOG_ADDR_RANGE_ERASE_SIZE_OFST 16
+
+/* MC_CMD_FC_IN_LOG_JTAG_UART msgrequest */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_LOG_OP_OFST 4 */
+/* Enable/disable printing to JTAG UART */
+#define MC_CMD_FC_IN_LOG_JTAG_UART_ENABLE_OFST 8
+
+/* MC_CMD_FC_IN_CLOCK msgrequest */
+#define MC_CMD_FC_IN_CLOCK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_CLOCK_OP_OFST 4
+#define MC_CMD_FC_IN_CLOCK_GET_TIME 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME 0x1 /* enum */
+/* Perform a clock operation */
+#define MC_CMD_FC_IN_CLOCK_ID_OFST 8
+#define MC_CMD_FC_IN_CLOCK_STATS 0x0 /* enum */
+#define MC_CMD_FC_IN_CLOCK_MAC 0x1 /* enum */
+
+/* MC_CMD_FC_IN_CLOCK_GET_TIME msgrequest */
+#define MC_CMD_FC_IN_CLOCK_GET_TIME_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* Retrieve the clock value of the specified clock */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+
+/* MC_CMD_FC_IN_CLOCK_SET_TIME msgrequest */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_CLOCK_OP_OFST 4 */
+/* MC_CMD_FC_IN_CLOCK_ID_OFST 8 */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_LO_OFST 12
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_SECONDS_HI_OFST 16
+/* Set the clock value of the specified clock */
+#define MC_CMD_FC_IN_CLOCK_SET_TIME_NANOSECONDS_OFST 20
+
+/* MC_CMD_FC_IN_DDR msgrequest */
+#define MC_CMD_FC_IN_DDR_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DDR_OP_OFST 4
+#define MC_CMD_FC_IN_DDR_SET_SPD 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_GET_STATUS 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_SET_INFO 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_OFST 8
+#define MC_CMD_FC_IN_DDR_BANK_B0 0x0 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_B1 0x1 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T0 0x2 /* enum */
+#define MC_CMD_FC_IN_DDR_BANK_T1 0x3 /* enum */
+#define MC_CMD_FC_IN_DDR_NUM_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DDR_SET_SPD msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_SPD_LEN 148
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* Flags */
+#define MC_CMD_FC_IN_DDR_FLAGS_OFST 12
+#define MC_CMD_FC_IN_DDR_SET_SPD_ACTIVE 0x1 /* enum */
+/* 128-byte page of serial presence detect data read from module's EEPROM */
+#define MC_CMD_FC_IN_DDR_SPD_OFST 16
+#define MC_CMD_FC_IN_DDR_SPD_LEN 1
+#define MC_CMD_FC_IN_DDR_SPD_NUM 128
+/* Page index of the spd data copied into MC_CMD_FC_IN_DDR_SPD */
+#define MC_CMD_FC_IN_DDR_SPD_PAGE_ID_OFST 144
+
+/* MC_CMD_FC_IN_DDR_SET_INFO msgrequest */
+#define MC_CMD_FC_IN_DDR_SET_INFO_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+/* Size of DDR */
+#define MC_CMD_FC_IN_DDR_SIZE_OFST 12
+
+/* MC_CMD_FC_IN_DDR_GET_STATUS msgrequest */
+#define MC_CMD_FC_IN_DDR_GET_STATUS_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* MC_CMD_FC_IN_DDR_OP_OFST 4 */
+/* Affected bank */
+/* MC_CMD_FC_IN_DDR_BANK_OFST 8 */
+
+/* MC_CMD_FC_IN_TIMESTAMP msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* FC timestamp operation code */
+#define MC_CMD_FC_IN_TIMESTAMP_OP_OFST 4
+/* enum: Read transmit timestamp(s) */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT 0x0
+/* enum: Read snapshot timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT 0x1
+/* enum: Clear all transmit timestamps */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT 0x2
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LEN 28
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_OP_OFST 4
+/* Control filtering of the returned timestamp and sequence number specified
+ * here
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_FILTER_OFST 8
+/* enum: Return most recent timestamp. No filtering */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_LATEST 0x0
+/* enum: Match timestamp against the PTP clock ID, port number and sequence
+ * number specified
+ */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_MATCH 0x1
+/* Clock identity of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LEN 8
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_LO_OFST 12
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_CLOCK_ID_HI_OFST 16
+/* Port number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_PORT_NUM_OFST 20
+/* Sequence number of PTP packet for which timestamp required */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_TRANSMIT_SEQ_NUM_OFST 24
+
+/* MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_READ_SNAPSHOT_OP_OFST 4
+
+/* MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT msgrequest */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_TIMESTAMP_CLEAR_TRANSMIT_OP_OFST 4
+
+/* MC_CMD_FC_IN_SPI msgrequest */
+#define MC_CMD_FC_IN_SPI_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Basic commands for SPI Flash. */
+#define MC_CMD_FC_IN_SPI_OP_OFST 4
+/* enum: SPI Flash read */
+#define MC_CMD_FC_IN_SPI_READ 0x0
+/* enum: SPI Flash write */
+#define MC_CMD_FC_IN_SPI_WRITE 0x1
+/* enum: SPI Flash erase */
+#define MC_CMD_FC_IN_SPI_ERASE 0x2
+
+/* MC_CMD_FC_IN_SPI_READ msgrequest */
+#define MC_CMD_FC_IN_SPI_READ_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_READ_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_READ_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_READ_NUMBYTES_OFST 12
+
+/* MC_CMD_FC_IN_SPI_WRITE msgrequest */
+#define MC_CMD_FC_IN_SPI_WRITE_LENMIN 16
+#define MC_CMD_FC_IN_SPI_WRITE_LENMAX 252
+#define MC_CMD_FC_IN_SPI_WRITE_LEN(num) (12+4*(num))
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_WRITE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_WRITE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_OFST 12
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_LEN 4
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_FC_IN_SPI_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_FC_IN_SPI_ERASE msgrequest */
+#define MC_CMD_FC_IN_SPI_ERASE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_SPI_ERASE_OP_OFST 4
+#define MC_CMD_FC_IN_SPI_ERASE_ADDR_OFST 8
+#define MC_CMD_FC_IN_SPI_ERASE_NUMBYTES_OFST 12
+
+/* MC_CMD_FC_IN_DIAG msgrequest */
+#define MC_CMD_FC_IN_DIAG_LEN 8
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+/* Operation code indicating component type */
+#define MC_CMD_FC_IN_DIAG_OP_OFST 4
+/* enum: Power noise generator. */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE 0x0
+/* enum: DDR soak test component. */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK 0x1
+/* enum: Diagnostics datapath control component. */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL 0x2
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_SUB_OP_OFST 8
+/* enum: Read the configuration (the 32-bit values in each of the clock enable
+ * count and toggle count registers)
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG 0x0
+/* enum: Write a new configuration to the clock enable count and toggle count
+ * registers
+ */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_READ_CONFIG_SUB_OP_OFST 8
+
+/* MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_SUB_OP_OFST 8
+/* The 32-bit value to be written to the toggle count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_TOGGLE_COUNT_OFST 12
+/* The 32-bit value to be written to the clock enable count register */
+#define MC_CMD_FC_IN_DIAG_POWER_NOISE_WRITE_CONFIG_CLKEN_COUNT_OFST 16
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_SUB_OP_OFST 8
+/* enum: Starts DDR soak test on selected banks */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START 0x0
+/* enum: Read status of DDR soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT 0x1
+/* enum: Stop test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP 0x2
+/* enum: Set or clear bit that triggers fake errors. These cause subsequent
+ * tests to fail until the bit is cleared.
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR 0x3
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_START msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SUB_OP_OFST 8
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_BANK_MASK_OFST 12
+/* Pattern to use in the soak test */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_PATTERN_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ZEROS 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONES 0x1 /* enum */
+/* Either multiple automatic tests until a STOP command is issued, or one
+ * single test
+ */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_TEST_TYPE_OFST 20
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_ONGOING_TEST 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_START_SINGLE_TEST 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_SUB_OP_OFST 8
+/* DDR bank to read status from */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_RESULT_BANK_ID_OFST 12
+#define MC_CMD_FC_DDR_BANK0 0x0 /* enum */
+#define MC_CMD_FC_DDR_BANK1 0x1 /* enum */
+#define MC_CMD_FC_DDR_BANK2 0x2 /* enum */
+#define MC_CMD_FC_DDR_BANK3 0x3 /* enum */
+#define MC_CMD_FC_DDR_AOEMEM_MAX_BANKS 0x4 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_SUB_OP_OFST 8
+/* Mask of DDR banks to be tested */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_STOP_BANK_MASK_OFST 12
+
+/* MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR msgrequest */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_LEN 20
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SUB_OP_OFST 8
+/* Mask of DDR banks to set/clear error flag on */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_BANK_MASK_OFST 12
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_FLAG_ACTION_OFST 16
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_CLEAR 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DDR_SOAK_ERROR_SET 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_LEN 12
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_OP_OFST 4
+/* Sub-opcode describing the operation to be carried out */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SUB_OP_OFST 8
+/* enum: Set a known datapath configuration */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE 0x0
+/* enum: Apply raw config to datapath control registers */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG 0x1
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_LEN 16
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SUB_OP_OFST 8
+/* Datapath configuration identifier */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_MODE_OFST 12
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_PASSTHROUGH 0x0 /* enum */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_SET_MODE_SNAKE 0x1 /* enum */
+
+/* MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG msgrequest */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 24
+/* MC_CMD_FC_IN_CMD_OFST 0 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_OP_OFST 4
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_SUB_OP_OFST 8
+/* Value to write into control register 1 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL1_OFST 12
+/* Value to write into control register 2 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL2_OFST 16
+/* Value to write into control register 3 */
+#define MC_CMD_FC_IN_DIAG_DATAPATH_CTRL_RAW_CONFIG_CONTROL3_OFST 20
+
+/* MC_CMD_FC_OUT msgresponse */
+#define MC_CMD_FC_OUT_LEN 0
+
+/* MC_CMD_FC_OUT_NULL msgresponse */
+#define MC_CMD_FC_OUT_NULL_LEN 0
+
+/* MC_CMD_FC_OUT_READ32 msgresponse */
+#define MC_CMD_FC_OUT_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_TRC_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_READ_LEN 16
+#define MC_CMD_FC_OUT_TRC_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_READ_DATA_NUM 4
+
+/* MC_CMD_FC_OUT_TRC_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_GET_VERSION msgresponse */
+#define MC_CMD_FC_OUT_GET_VERSION_LEN 12
+#define MC_CMD_FC_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_FC_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_FC_OUT_TRC_RX_READ msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_READ_LEN 8
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_LEN 4
+#define MC_CMD_FC_OUT_TRC_RX_READ_DATA_NUM 2
+
+/* MC_CMD_FC_OUT_TRC_RX_WRITE msgresponse */
+#define MC_CMD_FC_OUT_TRC_RX_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_RECONFIGURE msgresponse */
+#define MC_CMD_FC_OUT_MAC_RECONFIGURE_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_MAC_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_MAC_READ_STATUS msgresponse */
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_LEN 4
+#define MC_CMD_FC_OUT_MAC_READ_STATUS_STATUS_OFST 0
+
+/* MC_CMD_FC_OUT_MAC_GET_RX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_RX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_RX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_RX_NSTATS
+#define MC_CMD_FC_MAC_RX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_RX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_RX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_RX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_RX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_RX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_RX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_RX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_RX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_RX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_RX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_PKTS_1519_MAX 0x14 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_OVERSIZE_PKTS 0x15 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_JABBERS 0x16 /* enum */
+#define MC_CMD_FC_MAC_RX_STATS_FRAGMENTS 0x17 /* enum */
+#define MC_CMD_FC_MAC_RX_MAC_CONTROL_FRAMES 0x18 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_RX_NSTATS 0x19
+
+/* MC_CMD_FC_OUT_MAC_GET_TX_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_TX_NSTATS))+1))>>3)
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_TX_STATS_STATISTICS_NUM MC_CMD_FC_MAC_TX_NSTATS
+#define MC_CMD_FC_MAC_TX_STATS_OCTETS 0x0 /* enum */
+#define MC_CMD_FC_MAC_TX_OCTETS_OK 0x1 /* enum */
+#define MC_CMD_FC_MAC_TX_ALIGNMENT_ERRORS 0x2 /* enum */
+#define MC_CMD_FC_MAC_TX_PAUSE_MAC_CTRL_FRAMES 0x3 /* enum */
+#define MC_CMD_FC_MAC_TX_FRAMES_OK 0x4 /* enum */
+#define MC_CMD_FC_MAC_TX_CRC_ERRORS 0x5 /* enum */
+#define MC_CMD_FC_MAC_TX_VLAN_OK 0x6 /* enum */
+#define MC_CMD_FC_MAC_TX_ERRORS 0x7 /* enum */
+#define MC_CMD_FC_MAC_TX_UCAST_PKTS 0x8 /* enum */
+#define MC_CMD_FC_MAC_TX_MULTICAST_PKTS 0x9 /* enum */
+#define MC_CMD_FC_MAC_TX_BROADCAST_PKTS 0xa /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_DROP_EVENTS 0xb /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS 0xc /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_UNDERSIZE_PKTS 0xd /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_64 0xe /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_65_127 0xf /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_128_255 0x10 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_256_511 0x11 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_512_1023 0x12 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1024_1518 0x13 /* enum */
+#define MC_CMD_FC_MAC_TX_STATS_PKTS_1519_TX_MTU 0x14 /* enum */
+#define MC_CMD_FC_MAC_TX_MAC_CONTROL_FRAMES 0x15 /* enum */
+/* enum: (Last entry) */
+#define MC_CMD_FC_MAC_TX_NSTATS 0x16
+
+/* MC_CMD_FC_OUT_MAC_GET_STATS msgresponse */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_LEN ((((0-1+(64*MC_CMD_FC_MAC_NSTATS_PER_BLOCK))+1))>>3)
+/* MAC Statistics */
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LEN 8
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_LO_OFST 0
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_HI_OFST 4
+#define MC_CMD_FC_OUT_MAC_GET_STATS_STATISTICS_NUM MC_CMD_FC_MAC_NSTATS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_MAC msgresponse */
+#define MC_CMD_FC_OUT_MAC_LEN 0
+
+/* MC_CMD_FC_OUT_SFP msgresponse */
+#define MC_CMD_FC_OUT_SFP_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_START msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_START_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_TEST_POLL msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_LEN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CODE_WIDTH 8
+/* enum: Test not yet initiated */
+#define MC_CMD_FC_OP_DDR_TEST_NONE 0x0
+/* enum: Test is in progress */
+#define MC_CMD_FC_OP_DDR_TEST_INPROGRESS 0x1
+/* enum: Timed completed */
+#define MC_CMD_FC_OP_DDR_TEST_SUCCESS 0x2
+/* enum: Test did not complete in specified time */
+#define MC_CMD_FC_OP_DDR_TEST_TIMER_EXPIRED 0x3
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_LBN 11
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_LBN 9
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_LBN 8
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_PRESENT_B1_WIDTH 1
+/* Test result from FPGA */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_RESULT_OFST 4
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_LBN 31
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_LBN 30
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_T1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_LBN 29
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B0_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_LBN 28
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_FPGA_SUPPORTS_B1_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_LBN 15
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_LBN 10
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_T1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_LBN 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B0_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_LBN 0
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_B1_WIDTH 5
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_COMPLETE 0x0 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_FAIL 0x1 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_TEST_PASS 0x2 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_FAIL 0x3 /* enum */
+#define MC_CMD_FC_OUT_DDR_TEST_POLL_CAL_SUCCESS 0x4 /* enum */
+
+/* MC_CMD_FC_OUT_DDR_TEST msgresponse */
+#define MC_CMD_FC_OUT_DDR_TEST_LEN 0
+
+/* MC_CMD_FC_OUT_GET_ASSERT msgresponse */
+#define MC_CMD_FC_OUT_GET_ASSERT_LEN 144
+/* Assertion status flag. */
+#define MC_CMD_FC_OUT_GET_ASSERT_GLOBAL_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_LBN 8
+#define MC_CMD_FC_OUT_GET_ASSERT_STATE_WIDTH 8
+/* enum: No crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_CLEAR 0x0
+/* enum: New crash data available */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NEW 0x1
+/* enum: Crash data has been sent */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_STATE_NOTIFIED 0x2
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_LBN 0
+#define MC_CMD_FC_OUT_GET_ASSERT_TYPE_WIDTH 8
+/* enum: No crash has been recorded. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_NONE 0x0
+/* enum: Crash due to exception. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_EXCEPTION 0x1
+/* enum: Crash due to assertion. */
+#define MC_CMD_FC_GET_ASSERT_FLAGS_TYPE_ASSERTION 0x2
+/* Failing PC value */
+#define MC_CMD_FC_OUT_GET_ASSERT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_OFST 8
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_LEN 4
+#define MC_CMD_FC_OUT_GET_ASSERT_GP_REGS_OFFS_NUM 31
+/* Exception Type */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_TYPE_OFFS_OFST 132
+/* Instruction at which exception occurred */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_PC_ADDR_OFFS_OFST 136
+/* BAD Address that triggered address-based exception */
+#define MC_CMD_FC_OUT_GET_ASSERT_EXCEPTION_BAD_ADDR_OFFS_OFST 140
+
+/* MC_CMD_FC_OUT_FPGA_BUILD msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_FPGA_TYPE_WIDTH 8
+#define MC_CMD_FC_FPGA_TYPE_A7 0xa7 /* enum */
+#define MC_CMD_FC_FPGA_TYPE_A5 0xa5 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED1_WIDTH 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM3_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM4_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_B1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_SODIMM_T1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED2_WIDTH 2
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_CRC_APPEND_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 /* enum */
+#define MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED3_WIDTH 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LEN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_LO_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_RESERVED4_HI_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_BUILD_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_LBN 31
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_PMA_PASSTHROUGH_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_QDR_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_LBN 27
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DDR3_ECC_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_LBN 26
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_LBN 25
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_LBN 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_TO_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_LBN 23
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_T0_DDR3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_LBN 22
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_LBN 21
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DISCRETE1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_LBN 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM2_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_LBN 19
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SODIMM1_RLDRAM_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_LBN 18
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_LBN 17
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_7_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_10G 0x0 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_3_SPEED_40G 0x1 /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_LBN 15
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP7_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_LBN 14
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP6_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_LBN 13
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP5_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_LBN 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP4_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_LBN 11
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_LBN 10
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_LBN 9
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_LBN 8
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_LBN 7
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC3_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_LBN 6
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC2_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_LBN 5
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_LBN 4
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_FPGA_TYPE_WIDTH 4
+#define MC_CMD_FC_FPGA_V2_TYPE_A3 0x0 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A4 0x1 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A5 0x2 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_A7 0x3 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D3 0x8 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D4 0x9 /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D5 0xa /* enum */
+#define MC_CMD_FC_FPGA_V2_TYPE_D7 0xb /* enum */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_HI_OFST 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MINOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MAJOR_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_VERSION_LO_OFST 20
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_BUILD_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_LBN 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_DEPLOYMENT_VERSION_MICRO_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_BUILD_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_LBN 27
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_LBN 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_NIC1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_LBN 29
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP0_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_SFP1_DEF_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_RESERVED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_BUILD_FLAG_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_OFST 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_MEMORY_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_OFST 20
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_INSTANCE_SIZE_COUNT_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_FPGA_SERVICES_V2 msgresponse */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_LEN 32
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_INFO_OFST 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_LBN 31
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_APPLICATION_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_LBN 30
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IS_LICENSED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_COMPONENT_ID_WIDTH 14
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_NUM_WIDTH 4
+/* Build timestamp (seconds since epoch) */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_TIMESTAMP_OFST 4
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PARAMETERS_OFST 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_PTP_ENABLED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_LBN 8
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_FC_FLASH_BOOTED_WIDTH 1
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_IDENTIFIER_OFST 12
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_CHANGESET_WIDTH 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_LBN 16
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_BUILD_FLAG_WIDTH 1
+/* MC_CMD_FC_FPGA_BUILD_FLAG_INTERNAL 0x0 */
+/* MC_CMD_FC_FPGA_BUILD_FLAG_RELEASE 0x1 */
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_LO_OFST 24
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HI_OFST 28
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_LBN 0
+#define MC_CMD_FC_OUT_FPGA_SERVICES_V2_REVISION_HIGH_WIDTH 16
+
+/* MC_CMD_FC_OUT_BSP_VERSION msgresponse */
+#define MC_CMD_FC_OUT_BSP_VERSION_LEN 4
+/* Qsys system ID */
+#define MC_CMD_FC_OUT_BSP_VERSION_SYSID_OFST 0
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_LBN 12
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MAJOR_WIDTH 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_LBN 4
+#define MC_CMD_FC_OUT_BSP_VERSION_VERSION_MINOR_WIDTH 8
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_LBN 0
+#define MC_CMD_FC_OUT_BSP_VERSION_BUILD_NUM_WIDTH 4
+
+/* MC_CMD_FC_OUT_READ_MAP_COUNT msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_LEN 4
+/* Number of maps */
+#define MC_CMD_FC_OUT_READ_MAP_COUNT_NUM_MAPS_OFST 0
+
+/* MC_CMD_FC_OUT_READ_MAP_INDEX msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN 164
+/* Index of the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_INDEX_OFST 0
+/* Options for the map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_OPTIONS_OFST 4
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_8 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_16 0x1 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_32 0x2 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_64 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ALIGN_MASK 0x3 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_FC 0x4 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PATH_MEM 0x8 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_READ 0x10 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_PERM_WRITE 0x20 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_FREE 0x0 /* enum */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_LICENSED 0x40 /* enum */
+/* Address of start of map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_LO_OFST 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_ADDRESS_HI_OFST 12
+/* Length of address map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_LO_OFST 16
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LEN_HI_OFST 20
+/* Component information field */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_COMP_INFO_OFST 24
+/* License expiry data for map */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LEN 8
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_LO_OFST 28
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_LICENSE_DATE_HI_OFST 32
+/* Name of the component */
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_OFST 36
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_LEN 1
+#define MC_CMD_FC_OUT_READ_MAP_INDEX_NAME_NUM 128
+
+/* MC_CMD_FC_OUT_READ_MAP msgresponse */
+#define MC_CMD_FC_OUT_READ_MAP_LEN 0
+
+/* MC_CMD_FC_OUT_CAPABILITIES msgresponse */
+#define MC_CMD_FC_OUT_CAPABILITIES_LEN 8
+/* Number of internal ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_INTERNAL_OFST 0
+/* Number of external ports */
+#define MC_CMD_FC_OUT_CAPABILITIES_EXTERNAL_OFST 4
+
+/* MC_CMD_FC_OUT_GLOBAL_FLAGS msgresponse */
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_LEN 4
+#define MC_CMD_FC_OUT_GLOBAL_FLAGS_FLAGS_OFST 0
+
+/* MC_CMD_FC_OUT_IO_REL msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_LEN 0
+
+/* MC_CMD_FC_OUT_IO_REL_GET_ADDR msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_LEN 8
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_HI_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_GET_ADDR_ADDR_LO_OFST 4
+
+/* MC_CMD_FC_OUT_IO_REL_READ32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMIN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_LENMAX 252
+#define MC_CMD_FC_OUT_IO_REL_READ32_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_IO_REL_READ32_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_IO_REL_WRITE32 msgresponse */
+#define MC_CMD_FC_OUT_IO_REL_WRITE32_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_PHY msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LEN 48
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_0_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_VOD_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_1STPOSTTAP_WIDTH 16
+/* Transceiver Transmit settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_SETTINGS_1_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_PRETAP_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_TX_PREEMP_2NDPOSTTAP_WIDTH 16
+/* Transceiver Receive settings */
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_SETTINGS_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_DC_GAIN_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_TRC_RX_EQ_CONTROL_WIDTH 16
+/* Rx eye opening */
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_WIDTH_WIDTH 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_LBN 16
+#define MC_CMD_FC_OUT_UHLINK_PHY_RX_EYE_HEIGHT_WIDTH 16
+/* PCS status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PCS_STATUS_OFST 16
+/* Link status word */
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WORD_OFST 20
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_STATE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_LINK_CONFIGURED_WIDTH 1
+/* Current SFp parameters applied */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_OFST 24
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PARAMS_LEN 20
+/* Link speed is 100, 1000, 10000 */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_SPEED_OFST 24
+/* Length of copper cable - zero when not relevant */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_COPPER_LEN_OFST 28
+/* True if a dual speed SFP+ module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_DUAL_SPEED_OFST 32
+/* True if an SFP Module is present (other fields valid when true) */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_PRESENT_OFST 36
+/* The type of the SFP+ Module */
+#define MC_CMD_FC_OUT_UHLINK_PHY_SFP_TYPE_OFST 40
+/* PHY config flags */
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_OFST 44
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_LBN 0
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_DFE_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_LBN 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_AEQ_WIDTH 1
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_LBN 2
+#define MC_CMD_FC_OUT_UHLINK_PHY_PHY_CFG_RX_TUNING_WIDTH 1
+
+/* MC_CMD_FC_OUT_UHLINK_MAC msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_MAC_LEN 20
+/* MAC configuration applied */
+#define MC_CMD_FC_OUT_UHLINK_MAC_CONFIG_OFST 0
+/* MTU size */
+#define MC_CMD_FC_OUT_UHLINK_MAC_MTU_OFST 4
+/* IF Mode status */
+#define MC_CMD_FC_OUT_UHLINK_MAC_IF_STATUS_OFST 8
+/* MAC address configured */
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_LO_OFST 12
+#define MC_CMD_FC_OUT_UHLINK_MAC_ADDR_HI_OFST 16
+
+/* MC_CMD_FC_OUT_UHLINK_RX_EYE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_LEN ((((0-1+(32*MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK))+1))>>3)
+/* Rx Eye measurements */
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_OFST 0
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_RX_EYE_RX_EYE_NUM MC_CMD_FC_UHLINK_RX_EYE_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_DUMP_RX_EYE_PLOT_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_LEN ((((32-1+(64*MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK))+1))>>3)
+/* Has the eye plot dump completed and data returned is valid? */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_VALID_OFST 0
+/* Rx Eye binary plot */
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LEN 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_LO_OFST 4
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_HI_OFST 8
+#define MC_CMD_FC_OUT_UHLINK_READ_RX_EYE_PLOT_ROWS_NUM MC_CMD_FC_UHLINK_RX_EYE_PLOT_ROWS_PER_BLOCK
+
+/* MC_CMD_FC_OUT_UHLINK_RX_TUNE msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_RX_TUNE_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_SET_LEN 0
+
+/* MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_LEN 4
+#define MC_CMD_FC_OUT_UHLINK_LOOPBACK_GET_STATE_OFST 0
+
+/* MC_CMD_FC_OUT_UHLINK msgresponse */
+#define MC_CMD_FC_OUT_UHLINK_LEN 0
+
+/* MC_CMD_FC_OUT_SET_LINK msgresponse */
+#define MC_CMD_FC_OUT_SET_LINK_LEN 0
+
+/* MC_CMD_FC_OUT_LICENSE msgresponse */
+#define MC_CMD_FC_OUT_LICENSE_LEN 12
+/* Count of valid keys */
+#define MC_CMD_FC_OUT_LICENSE_VALID_KEYS_OFST 0
+/* Count of invalid keys */
+#define MC_CMD_FC_OUT_LICENSE_INVALID_KEYS_OFST 4
+/* Count of blacklisted keys */
+#define MC_CMD_FC_OUT_LICENSE_BLACKLISTED_KEYS_OFST 8
+
+/* MC_CMD_FC_OUT_STARTUP msgresponse */
+#define MC_CMD_FC_OUT_STARTUP_LEN 4
+/* Capabilities of the FPGA/FC */
+#define MC_CMD_FC_OUT_STARTUP_CAPABILITIES_OFST 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_LBN 0
+#define MC_CMD_FC_OUT_STARTUP_CAN_ACCESS_FLASH_WIDTH 1
+
+/* MC_CMD_FC_OUT_DMA_READ msgresponse */
+#define MC_CMD_FC_OUT_DMA_READ_LENMIN 1
+#define MC_CMD_FC_OUT_DMA_READ_LENMAX 252
+#define MC_CMD_FC_OUT_DMA_READ_LEN(num) (0+1*(num))
+/* The data read */
+#define MC_CMD_FC_OUT_DMA_READ_DATA_OFST 0
+#define MC_CMD_FC_OUT_DMA_READ_DATA_LEN 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MINNUM 1
+#define MC_CMD_FC_OUT_DMA_READ_DATA_MAXNUM 252
+
+/* MC_CMD_FC_OUT_TIMED_READ_SET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_LEN 4
+/* Timer handle */
+#define MC_CMD_FC_OUT_TIMED_READ_SET_FC_HANDLE_OFST 0
+
+/* MC_CMD_FC_OUT_TIMED_READ_GET msgresponse */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_LEN 52
+/* Host supplied handle (unique) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_HANDLE_OFST 0
+/* Address into which to transfer data in host */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_LO_OFST 4
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_DMA_ADDRESS_HI_OFST 8
+/* AOE address from which to transfer data */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_LO_OFST 12
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_ADDRESS_HI_OFST 16
+/* Length of AOE transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_AOE_LENGTH_OFST 20
+/* Length of host transfer (total) */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_HOST_LENGTH_OFST 24
+/* See FLAGS entry for MC_CMD_FC_IN_TIMED_READ_SET */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_FLAGS_OFST 28
+#define MC_CMD_FC_OUT_TIMED_READ_GET_PERIOD_OFST 32
+/* When active, start read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_LO_OFST 36
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_START_HI_OFST 40
+/* When active, end read time */
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LEN 8
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_LO_OFST 44
+#define MC_CMD_FC_OUT_TIMED_READ_GET_CLOCK_END_HI_OFST 48
+
+/* MC_CMD_FC_OUT_LOG_ADDR_RANGE msgresponse */
+#define MC_CMD_FC_OUT_LOG_ADDR_RANGE_LEN 0
+
+/* MC_CMD_FC_OUT_LOG msgresponse */
+#define MC_CMD_FC_OUT_LOG_LEN 0
+
+/* MC_CMD_FC_OUT_CLOCK_GET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_LEN 24
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_CLOCK_ID_OFST 0
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LEN 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_LO_OFST 4
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_SECONDS_HI_OFST 8
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_NANOSECONDS_OFST 12
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_RANGE_OFST 16
+#define MC_CMD_FC_OUT_CLOCK_GET_TIME_PRECISION_OFST 20
+
+/* MC_CMD_FC_OUT_CLOCK_SET_TIME msgresponse */
+#define MC_CMD_FC_OUT_CLOCK_SET_TIME_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_SPD msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_SPD_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_SET_INFO msgresponse */
+#define MC_CMD_FC_OUT_DDR_SET_INFO_LEN 0
+
+/* MC_CMD_FC_OUT_DDR_GET_STATUS msgresponse */
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_LEN 4
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_FLAGS_OFST 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_LBN 0
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_READY_WIDTH 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_LBN 1
+#define MC_CMD_FC_OUT_DDR_GET_STATUS_CALIBRATED_WIDTH 1
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_TRANSMIT_NANOSECONDS_OFST 4
+
+/* MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT msgresponse */
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMIN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LENMAX 248
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_LEN(num) (0+8*(num))
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_SECONDS_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_NANOSECONDS_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LEN 8
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_LO_OFST 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_HI_OFST 4
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MINNUM 0
+#define MC_CMD_FC_OUT_TIMESTAMP_READ_SNAPSHOT_TIMESTAMP_MAXNUM 31
+
+/* MC_CMD_FC_OUT_SPI_READ msgresponse */
+#define MC_CMD_FC_OUT_SPI_READ_LENMIN 4
+#define MC_CMD_FC_OUT_SPI_READ_LENMAX 252
+#define MC_CMD_FC_OUT_SPI_READ_LEN(num) (0+4*(num))
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_OFST 0
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_LEN 4
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MINNUM 1
+#define MC_CMD_FC_OUT_SPI_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_FC_OUT_SPI_WRITE msgresponse */
+#define MC_CMD_FC_OUT_SPI_WRITE_LEN 0
+
+/* MC_CMD_FC_OUT_SPI_ERASE msgresponse */
+#define MC_CMD_FC_OUT_SPI_ERASE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_LEN 8
+/* The 32-bit value read from the toggle count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_TOGGLE_COUNT_OFST 0
+/* The 32-bit value read from the clock enable count register */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_READ_CONFIG_CLKEN_COUNT_OFST 4
+
+/* MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_POWER_NOISE_WRITE_CONFIG_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_START msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_START_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_LEN 8
+/* DDR soak test status word; bits [4:0] are relevant. */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_STATUS_OFST 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_LBN 0
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PASSED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_LBN 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_FAILED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_LBN 2
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_COMPLETED_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_LBN 3
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_TIMEOUT_WIDTH 1
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_LBN 4
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_PNF_WIDTH 1
+/* DDR soak test error count */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_RESULT_ERR_COUNT_OFST 4
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_STOP_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DDR_SOAK_ERROR_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_SET_MODE_LEN 0
+
+/* MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG msgresponse */
+#define MC_CMD_FC_OUT_DIAG_DATAPATH_CTRL_RAW_CONFIG_LEN 0
+
+
+/***********************************/
+/* MC_CMD_AOE
+ * AOE operations on MC
+ */
+#define MC_CMD_AOE 0xa
+
+/* MC_CMD_AOE_IN msgrequest */
+#define MC_CMD_AOE_IN_LEN 4
+#define MC_CMD_AOE_IN_OP_HDR_OFST 0
+#define MC_CMD_AOE_IN_OP_LBN 0
+#define MC_CMD_AOE_IN_OP_WIDTH 8
+/* enum: FPGA and CPLD information */
+#define MC_CMD_AOE_OP_INFO 0x1
+/* enum: Currents and voltages read from MCP3424s; DEBUG */
+#define MC_CMD_AOE_OP_CURRENTS 0x2
+/* enum: Temperatures at locations around the PCB; DEBUG */
+#define MC_CMD_AOE_OP_TEMPERATURES 0x3
+/* enum: Set CPLD to idle */
+#define MC_CMD_AOE_OP_CPLD_IDLE 0x4
+/* enum: Read from CPLD register */
+#define MC_CMD_AOE_OP_CPLD_READ 0x5
+/* enum: Write to CPLD register */
+#define MC_CMD_AOE_OP_CPLD_WRITE 0x6
+/* enum: Execute CPLD instruction */
+#define MC_CMD_AOE_OP_CPLD_INSTRUCTION 0x7
+/* enum: Reprogram the CPLD on the AOE device */
+#define MC_CMD_AOE_OP_CPLD_REPROGRAM 0x8
+/* enum: AOE power control */
+#define MC_CMD_AOE_OP_POWER 0x9
+/* enum: AOE image loading */
+#define MC_CMD_AOE_OP_LOAD 0xa
+/* enum: Fan monitoring */
+#define MC_CMD_AOE_OP_FAN_CONTROL 0xb
+/* enum: Fan failures since last reset */
+#define MC_CMD_AOE_OP_FAN_FAILURES 0xc
+/* enum: Get generic AOE MAC statistics */
+#define MC_CMD_AOE_OP_MAC_STATS 0xd
+/* enum: Retrieve PHY specific information */
+#define MC_CMD_AOE_OP_GET_PHY_MEDIA_INFO 0xe
+/* enum: Write a number of JTAG primitive commands, return will give data */
+#define MC_CMD_AOE_OP_JTAG_WRITE 0xf
+/* enum: Control access to the FPGA via the Siena JTAG Chain */
+#define MC_CMD_AOE_OP_FPGA_ACCESS 0x10
+/* enum: Set the MTU offset between Siena and AOE MACs */
+#define MC_CMD_AOE_OP_SET_MTU_OFFSET 0x11
+/* enum: How link state is handled */
+#define MC_CMD_AOE_OP_LINK_STATE 0x12
+/* enum: How Siena MAC statistics are reported (deprecated - use
+ * MC_CMD_AOE_OP_ASIC_STATS)
+ */
+#define MC_CMD_AOE_OP_SIENA_STATS 0x13
+/* enum: How native ASIC MAC statistics are reported - replaces the deprecated
+ * command MC_CMD_AOE_OP_SIENA_STATS
+ */
+#define MC_CMD_AOE_OP_ASIC_STATS 0x13
+/* enum: DDR memory information */
+#define MC_CMD_AOE_OP_DDR 0x14
+/* enum: FC control */
+#define MC_CMD_AOE_OP_FC 0x15
+/* enum: DDR ECC status reads */
+#define MC_CMD_AOE_OP_DDR_ECC_STATUS 0x16
+/* enum: Commands for MC-SPI Master emulation */
+#define MC_CMD_AOE_OP_MC_SPI_MASTER 0x17
+/* enum: Commands for FC boot control */
+#define MC_CMD_AOE_OP_FC_BOOT 0x18
+
+/* MC_CMD_AOE_OUT msgresponse */
+#define MC_CMD_AOE_OUT_LEN 0
+
+/* MC_CMD_AOE_IN_INFO msgrequest */
+#define MC_CMD_AOE_IN_INFO_LEN 4
+#define MC_CMD_AOE_IN_CMD_OFST 0
+
+/* MC_CMD_AOE_IN_CURRENTS msgrequest */
+#define MC_CMD_AOE_IN_CURRENTS_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_TEMPERATURES msgrequest */
+#define MC_CMD_AOE_IN_TEMPERATURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_CPLD_IDLE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_IDLE_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_CPLD_READ msgrequest */
+#define MC_CMD_AOE_IN_CPLD_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_READ_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_READ_WIDTH_OFST 8
+
+/* MC_CMD_AOE_IN_CPLD_WRITE msgrequest */
+#define MC_CMD_AOE_IN_CPLD_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_WRITE_REGISTER_OFST 4
+#define MC_CMD_AOE_IN_CPLD_WRITE_WIDTH_OFST 8
+#define MC_CMD_AOE_IN_CPLD_WRITE_VALUE_OFST 12
+
+/* MC_CMD_AOE_IN_CPLD_INSTRUCTION msgrequest */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_INSTRUCTION_INSTRUCTION_OFST 4
+
+/* MC_CMD_AOE_IN_CPLD_REPROGRAM msgrequest */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_OP_OFST 4
+/* enum: Reprogram CPLD, poll for completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM 0x1
+/* enum: Reprogram CPLD, send event on completion */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_REPROGRAM_EVENT 0x3
+/* enum: Get status of reprogramming operation */
+#define MC_CMD_AOE_IN_CPLD_REPROGRAM_STATUS 0x4
+
+/* MC_CMD_AOE_IN_POWER msgrequest */
+#define MC_CMD_AOE_IN_POWER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Turn on or off AOE power */
+#define MC_CMD_AOE_IN_POWER_OP_OFST 4
+/* enum: Turn off FPGA power */
+#define MC_CMD_AOE_IN_POWER_OFF 0x0
+/* enum: Turn on FPGA power */
+#define MC_CMD_AOE_IN_POWER_ON 0x1
+/* enum: Clear peak power measurement */
+#define MC_CMD_AOE_IN_POWER_CLEAR 0x2
+/* enum: Show current power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_CURRENT 0x3
+/* enum: Show peak power in sensors output */
+#define MC_CMD_AOE_IN_POWER_SHOW_PEAK 0x4
+/* enum: Show current DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_LAST 0x5
+/* enum: Show peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_PEAK 0x6
+/* enum: Clear peak DDR current */
+#define MC_CMD_AOE_IN_POWER_DDR_CLEAR 0x7
+
+/* MC_CMD_AOE_IN_LOAD msgrequest */
+#define MC_CMD_AOE_IN_LOAD_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Image to be loaded (0 - main or 1 - diagnostic) to load in normal sequence
+ */
+#define MC_CMD_AOE_IN_LOAD_IMAGE_OFST 4
+
+/* MC_CMD_AOE_IN_FAN_CONTROL msgrequest */
+#define MC_CMD_AOE_IN_FAN_CONTROL_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* If non zero report measured fan RPM rather than nominal */
+#define MC_CMD_AOE_IN_FAN_CONTROL_REAL_RPM_OFST 4
+
+/* MC_CMD_AOE_IN_FAN_FAILURES msgrequest */
+#define MC_CMD_AOE_IN_FAN_FAILURES_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_MAC_STATS msgrequest */
+#define MC_CMD_AOE_IN_MAC_STATS_LEN 24
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port */
+#define MC_CMD_AOE_IN_MAC_STATS_PORT_OFST 4
+/* Host memory address for statistics */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LEN 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_LO_OFST 8
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_ADDR_HI_OFST 12
+#define MC_CMD_AOE_IN_MAC_STATS_CMD_OFST 16
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LBN 0
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_LBN 1
+#define MC_CMD_AOE_IN_MAC_STATS_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_AOE_IN_MAC_STATS_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_LBN 16
+#define MC_CMD_AOE_IN_MAC_STATS_PERIOD_MS_WIDTH 16
+/* Length of DMA data (optional) */
+#define MC_CMD_AOE_IN_MAC_STATS_DMA_LEN_OFST 20
+
+/* MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO msgrequest */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port */
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PORT_OFST 4
+#define MC_CMD_AOE_IN_GET_PHY_MEDIA_INFO_PAGE_OFST 8
+
+/* MC_CMD_AOE_IN_JTAG_WRITE msgrequest */
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_IN_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_IN_JTAG_WRITE_LEN(num) (8+4*(num))
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATALEN_OFST 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_IN_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_IN_FPGA_ACCESS msgrequest */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Enable or disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_OP_OFST 4
+/* enum: Enable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_ENABLE 0x1
+/* enum: Disable access */
+#define MC_CMD_AOE_IN_FPGA_ACCESS_DISABLE 0x2
+
+/* MC_CMD_AOE_IN_SET_MTU_OFFSET msgrequest */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* AOE port - when not ALL_EXTERNAL or ALL_INTERNAL specifies port number */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_PORT_OFST 4
+/* enum: Apply to all external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_EXTERNAL 0x8000
+/* enum: Apply to all internal ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_ALL_INTERNAL 0x4000
+/* The MTU offset to be applied to the external ports */
+#define MC_CMD_AOE_IN_SET_MTU_OFFSET_OFFSET_OFST 8
+
+/* MC_CMD_AOE_IN_LINK_STATE msgrequest */
+#define MC_CMD_AOE_IN_LINK_STATE_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_LINK_STATE_MODE_OFST 4
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_LBN 0
+#define MC_CMD_AOE_IN_LINK_STATE_CONFIG_MODE_WIDTH 8
+/* enum: AOE and associated external port */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_SEPARATE 0x0
+/* enum: AOE and OR of all external ports */
+#define MC_CMD_AOE_IN_LINK_STATE_SIMPLE_COMBINED 0x1
+/* enum: Individual ports */
+#define MC_CMD_AOE_IN_LINK_STATE_DIAGNOSTIC 0x2
+/* enum: Configure link state mode on given AOE port */
+#define MC_CMD_AOE_IN_LINK_STATE_CUSTOM 0x3
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_LBN 8
+#define MC_CMD_AOE_IN_LINK_STATE_OPERATION_WIDTH 8
+/* enum: No-op */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_NONE 0x0
+/* enum: logical OR of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_OR 0x1
+/* enum: logical AND of all SFP ports link status */
+#define MC_CMD_AOE_IN_LINK_STATE_OP_AND 0x2
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_LBN 16
+#define MC_CMD_AOE_IN_LINK_STATE_SFP_MASK_WIDTH 16
+
+/* MC_CMD_AOE_IN_SIENA_STATS msgrequest */
+#define MC_CMD_AOE_IN_SIENA_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_SIENA_STATS_MODE_OFST 4
+/* enum: Statistics from Siena (default) */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_SIENA 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_SIENA_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_ASIC_STATS msgrequest */
+#define MC_CMD_AOE_IN_ASIC_STATS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* How MAC statistics are reported */
+#define MC_CMD_AOE_IN_ASIC_STATS_MODE_OFST 4
+/* enum: Statistics from the ASIC (default) */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_ASIC 0x0
+/* enum: Statistics from AOE external ports */
+#define MC_CMD_AOE_IN_ASIC_STATS_STATS_AOE 0x1
+
+/* MC_CMD_AOE_IN_DDR msgrequest */
+#define MC_CMD_AOE_IN_DDR_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_DDR_BANK_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+/* Page index of SPD data */
+#define MC_CMD_AOE_IN_DDR_SPD_PAGE_ID_OFST 8
+
+/* MC_CMD_AOE_IN_FC msgrequest */
+#define MC_CMD_AOE_IN_FC_LEN 4
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+
+/* MC_CMD_AOE_IN_DDR_ECC_STATUS msgrequest */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_DDR_ECC_STATUS_BANK_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_FC/MC_CMD_FC_IN_DDR/MC_CMD_FC_IN_DDR_BANK */
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* Basic commands for MC SPI Master emulation. */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_OP_OFST 4
+/* enum: MC SPI read */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ 0x0
+/* enum: MC SPI write */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE 0x1
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_READ msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_LEN 12
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_READ_OFFSET_OFST 8
+
+/* MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE msgrequest */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_LEN 16
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OP_OFST 4
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_OFFSET_OFST 8
+#define MC_CMD_AOE_IN_MC_SPI_MASTER_WRITE_DATA_OFST 12
+
+/* MC_CMD_AOE_IN_FC_BOOT msgrequest */
+#define MC_CMD_AOE_IN_FC_BOOT_LEN 8
+/* MC_CMD_AOE_IN_CMD_OFST 0 */
+/* FC boot control flags */
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_OFST 4
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_LBN 0
+#define MC_CMD_AOE_IN_FC_BOOT_CONTROL_BOOT_ENABLE_WIDTH 1
+
+/* MC_CMD_AOE_OUT_INFO msgresponse */
+#define MC_CMD_AOE_OUT_INFO_LEN 44
+/* JTAG IDCODE of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_IDCODE_OFST 0
+/* Version of CPLD */
+#define MC_CMD_AOE_OUT_INFO_CPLD_VERSION_OFST 4
+/* JTAG IDCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IDCODE_OFST 8
+/* JTAG USERCODE of FPGA */
+#define MC_CMD_AOE_OUT_INFO_FPGA_VERSION_OFST 12
+/* FPGA type - read from CPLD straps */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_OFST 16
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A5_C2 0x1 /* enum */
+#define MC_CMD_AOE_OUT_INFO_FPGA_TYPE_A7_C2 0x2 /* enum */
+/* FPGA state (debug) */
+#define MC_CMD_AOE_OUT_INFO_FPGA_STATE_OFST 20
+/* FPGA image - partition from which loaded */
+#define MC_CMD_AOE_OUT_INFO_FPGA_IMAGE_OFST 24
+/* FC state */
+#define MC_CMD_AOE_OUT_INFO_FC_STATE_OFST 28
+/* enum: Set if watchdog working */
+#define MC_CMD_AOE_OUT_INFO_WATCHDOG 0x1
+/* enum: Set if MC-FC communications working */
+#define MC_CMD_AOE_OUT_INFO_COMMS 0x2
+/* Random pieces of information */
+#define MC_CMD_AOE_OUT_INFO_FLAGS_OFST 32
+/* enum: Power to FPGA supplied by PEG connector, not PCIe bus */
+#define MC_CMD_AOE_OUT_INFO_PEG_POWER 0x1
+/* enum: CPLD apparently good */
+#define MC_CMD_AOE_OUT_INFO_CPLD_GOOD 0x2
+/* enum: FPGA working normally */
+#define MC_CMD_AOE_OUT_INFO_FPGA_GOOD 0x4
+/* enum: FPGA is powered */
+#define MC_CMD_AOE_OUT_INFO_FPGA_POWER 0x8
+/* enum: Board has incompatible SODIMMs fitted */
+#define MC_CMD_AOE_OUT_INFO_BAD_SODIMM 0x10
+/* enum: Board has ByteBlaster connected */
+#define MC_CMD_AOE_OUT_INFO_HAS_BYTEBLASTER 0x20
+/* enum: FPGA Boot flash has an invalid header. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_BAD_BOOT_HDR 0x40
+/* enum: FPGA Application flash is accessible. */
+#define MC_CMD_AOE_OUT_INFO_FPGA_APP_FLASH_GOOD 0x80
+/* Revision of Modena and Sorrento boards. Sorrento can be R1_2 or R1_3. */
+#define MC_CMD_AOE_OUT_INFO_BOARD_REVISION_OFST 36
+#define MC_CMD_AOE_OUT_INFO_UNKNOWN 0x0 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_0 0x10 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_1 0x11 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_2 0x12 /* enum */
+#define MC_CMD_AOE_OUT_INFO_R1_3 0x13 /* enum */
+/* Result of FC booting - not valid while a ByteBlaster is connected. */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_RESULT_OFST 40
+/* enum: No error */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_NO_ERROR 0x0
+/* enum: Bad address set in CPLD */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_ADDRESS 0x1
+/* enum: Bad header */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_MAGIC 0x2
+/* enum: Bad text section details */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_TEXT 0x3
+/* enum: Bad checksum */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_CHECKSUM 0x4
+/* enum: Bad BSP */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_BAD_BSP 0x5
+/* enum: Flash mode is invalid */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_FAIL_INVALID_FLASH_MODE 0x6
+/* enum: FC application loaded and execution attempted */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_EXECUTE 0x80
+/* enum: FC application Started */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_APP_STARTED 0x81
+/* enum: No bootrom in FPGA */
+#define MC_CMD_AOE_OUT_INFO_FC_BOOT_NO_BOOTROM 0xff
+
+/* MC_CMD_AOE_OUT_CURRENTS msgresponse */
+#define MC_CMD_AOE_OUT_CURRENTS_LEN 68
+/* Set of currents and voltages (mA or mV as appropriate) */
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_CURRENTS_VALUES_NUM 17
+#define MC_CMD_AOE_OUT_CURRENTS_I_2V5 0x0 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V8 0x1 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_GXB 0x2 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_PGM 0x3 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_XCVR 0x4 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_1V5 0x5 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_3V3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_1V5 0x7 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_IN 0x8 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT 0x9 /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_IN 0xa /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR1 0xb /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR1 0xc /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR2 0xd /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR2 0xe /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_I_OUT_DDR3 0xf /* enum */
+#define MC_CMD_AOE_OUT_CURRENTS_V_OUT_DDR3 0x10 /* enum */
+
+/* MC_CMD_AOE_OUT_TEMPERATURES msgresponse */
+#define MC_CMD_AOE_OUT_TEMPERATURES_LEN 40
+/* Set of temperatures */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_OFST 0
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_LEN 4
+#define MC_CMD_AOE_OUT_TEMPERATURES_VALUES_NUM 10
+/* enum: The first set of enum values are for Modena code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO1 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO2 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_VCCIO3 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_PSU 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_FPGA 0x8 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SIENA 0x9 /* enum */
+/* enum: The second set of enum values are for Sorrento code. */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_0 0x0
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_MAIN_1 0x1 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_0 0x2 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_IND_1 0x3 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_0 0x4 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_SODIMM_1 0x5 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_FPGA 0x6 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY0 0x7 /* enum */
+#define MC_CMD_AOE_OUT_TEMPERATURES_SORRENTO_PHY1 0x8 /* enum */
+
+/* MC_CMD_AOE_OUT_CPLD_READ msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_READ_LEN 4
+/* The value read from the CPLD */
+#define MC_CMD_AOE_OUT_CPLD_READ_VALUE_OFST 0
+
+/* MC_CMD_AOE_OUT_FAN_FAILURES msgresponse */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMIN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LENMAX 252
+#define MC_CMD_AOE_OUT_FAN_FAILURES_LEN(num) (0+4*(num))
+/* Failure counts for each fan */
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_OFST 0
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_LEN 4
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MINNUM 1
+#define MC_CMD_AOE_OUT_FAN_FAILURES_COUNT_MAXNUM 63
+
+/* MC_CMD_AOE_OUT_CPLD_REPROGRAM msgresponse */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_LEN 4
+/* Results of status command (only) */
+#define MC_CMD_AOE_OUT_CPLD_REPROGRAM_STATUS_OFST 0
+
+/* MC_CMD_AOE_OUT_POWER_OFF msgresponse */
+#define MC_CMD_AOE_OUT_POWER_OFF_LEN 0
+
+/* MC_CMD_AOE_OUT_POWER_ON msgresponse */
+#define MC_CMD_AOE_OUT_POWER_ON_LEN 0
+
+/* MC_CMD_AOE_OUT_LOAD msgresponse */
+#define MC_CMD_AOE_OUT_LOAD_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_DMA msgresponse */
+#define MC_CMD_AOE_OUT_MAC_STATS_DMA_LEN 0
+
+/* MC_CMD_AOE_OUT_MAC_STATS_NO_DMA msgresponse: See MC_CMD_MAC_STATS_OUT_NO_DMA
+ * for details
+ */
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_AOE_OUT_MAC_STATS_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+
+/* MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO msgresponse */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMIN 5
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LENMAX 252
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_GET_PHY_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_AOE_OUT_JTAG_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMIN 12
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LENMAX 252
+#define MC_CMD_AOE_OUT_JTAG_WRITE_LEN(num) (8+4*(num))
+/* Used to align the in and out data blocks so the MC can re-use the cmd */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATALEN_OFST 0
+/* out bytes */
+#define MC_CMD_AOE_OUT_JTAG_WRITE_PAD_OFST 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_OFST 8
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_LEN 4
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MINNUM 1
+#define MC_CMD_AOE_OUT_JTAG_WRITE_DATA_MAXNUM 61
+
+/* MC_CMD_AOE_OUT_FPGA_ACCESS msgresponse */
+#define MC_CMD_AOE_OUT_FPGA_ACCESS_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR msgresponse */
+#define MC_CMD_AOE_OUT_DDR_LENMIN 17
+#define MC_CMD_AOE_OUT_DDR_LENMAX 252
+#define MC_CMD_AOE_OUT_DDR_LEN(num) (16+1*(num))
+/* Information on the module. */
+#define MC_CMD_AOE_OUT_DDR_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_LBN 0
+#define MC_CMD_AOE_OUT_DDR_PRESENT_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_LBN 1
+#define MC_CMD_AOE_OUT_DDR_POWERED_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_LBN 2
+#define MC_CMD_AOE_OUT_DDR_OPERATIONAL_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_LBN 3
+#define MC_CMD_AOE_OUT_DDR_NOT_REACHABLE_WIDTH 1
+/* Memory size, in MB. */
+#define MC_CMD_AOE_OUT_DDR_CAPACITY_OFST 4
+/* The memory type, as reported from SPD information */
+#define MC_CMD_AOE_OUT_DDR_TYPE_OFST 8
+/* Nominal voltage of the module (as applied) */
+#define MC_CMD_AOE_OUT_DDR_VOLTAGE_OFST 12
+/* SPD data read from the module */
+#define MC_CMD_AOE_OUT_DDR_SPD_OFST 16
+#define MC_CMD_AOE_OUT_DDR_SPD_LEN 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MINNUM 1
+#define MC_CMD_AOE_OUT_DDR_SPD_MAXNUM 236
+
+/* MC_CMD_AOE_OUT_SET_MTU_OFFSET msgresponse */
+#define MC_CMD_AOE_OUT_SET_MTU_OFFSET_LEN 0
+
+/* MC_CMD_AOE_OUT_LINK_STATE msgresponse */
+#define MC_CMD_AOE_OUT_LINK_STATE_LEN 0
+
+/* MC_CMD_AOE_OUT_SIENA_STATS msgresponse */
+#define MC_CMD_AOE_OUT_SIENA_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_ASIC_STATS msgresponse */
+#define MC_CMD_AOE_OUT_ASIC_STATS_LEN 0
+
+/* MC_CMD_AOE_OUT_FC msgresponse */
+#define MC_CMD_AOE_OUT_FC_LEN 0
+
+/* MC_CMD_AOE_OUT_DDR_ECC_STATUS msgresponse */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_LEN 8
+/* Flags describing status info on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_FLAGS_OFST 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_VALID_WIDTH 1
+/* DDR ECC status on the module. */
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_STATUS_OFST 4
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_LBN 0
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_LBN 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_LBN 2
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_WIDTH 1
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_LBN 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_SBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_LBN 16
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_DBE_COUNT_WIDTH 8
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_LBN 24
+#define MC_CMD_AOE_OUT_DDR_ECC_STATUS_CORDROP_COUNT_WIDTH 8
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_READ msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_LEN 4
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_READ_DATA_OFST 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_WRITE_LEN 0
+
+/* MC_CMD_AOE_OUT_MC_SPI_MASTER msgresponse */
+#define MC_CMD_AOE_OUT_MC_SPI_MASTER_LEN 0
+
+/* MC_CMD_AOE_OUT_FC_BOOT msgresponse */
+#define MC_CMD_AOE_OUT_FC_BOOT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+#undef MC_CMD_0xb_PRIVILEGE_CTG
+
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PTP_IN msgrequest */
+#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
+#define MC_CMD_PTP_IN_OP_OFST 0
+#define MC_CMD_PTP_IN_OP_LEN 1
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC.
+ */
+#define MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x1c
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_ENABLE_LEN 16
+#define MC_CMD_PTP_IN_CMD_OFST 0
+#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+/* Event queue for PTP events */
+#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+/* PTP timestamping mode */
+#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define MC_CMD_PTP_IN_DISABLE_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Transmit packet length */
+#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+/* Transmit packet data */
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define MC_CMD_PTP_IN_STATUS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_ADJUST_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of time readings to capture */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Enable or disable packet testing */
+#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
+#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset PTP statistics */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define MC_CMD_PTP_IN_DEBUG_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Debug operations */
+#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment major value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+/* Time adjustment minor value */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Queue id to send events back */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Original field containing queue ID. Now extended to include flags. */
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Unsubscribe options */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+/* enum: Unsubscribe a single queue */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* NIC - Host System Clock Synchronization status */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+/* enum: Host System clock and NIC clock are not in sync */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
+#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+/* Number of packets transmitted and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+/* Number of packets received and timestamped */
+#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+/* Number of packets timestamped by the FPGA */
+#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+/* Number of packets filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+/* Number of packets not filter matched */
+#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+/* Number of PPS overflows (noise on input?) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+/* Number of PPS bad periods */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+/* Minimum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+/* Maximum period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+/* Last period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+/* Mean period of PPS pulse in nanoseconds */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+/* Last offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+/* Mean offset of PPS pulse in nanoseconds (signed) */
+#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+/* Value of seconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Timestamp major value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+/* Value of nanoseconds timestamp */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Timestamp minor value */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+/* Host time immediately after NIC's hardware clock read */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+/* Number of nanoseconds waited after reading NIC's hardware clock */
+#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define MC_CMD_PTP_MANF_CLOCK_READ 0xe
+/* Presence of external oscillator */
+#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+/* Number of packets received by FPGA */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+/* Number of packets received by Siena filters */
+#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+/* enum: Times are in seconds and nanoseconds */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+/* Various PTP capabilities */
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+#undef MC_CMD_0xc_PRIVILEGE_CTG
+
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
+#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
+#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+#undef MC_CMD_0xd_PRIVILEGE_CTG
+
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
+#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+#undef MC_CMD_0x54_PRIVILEGE_CTG
+
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+#undef MC_CMD_0xf_PRIVILEGE_CTG
+
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define MC_CMD_STACKINFO_OUT_LENMIN 12
+#define MC_CMD_STACKINFO_OUT_LENMAX 252
+#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+#undef MC_CMD_0x10_PRIVILEGE_CTG
+
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
+#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
+#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
+#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#undef MC_CMD_0x11_PRIVILEGE_CTG
+
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* enum: Internal. */
+/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
+/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
+#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
+#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
+#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+/* Value */
+#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* enum: Good. */
+/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#undef MC_CMD_0x12_PRIVILEGE_CTG
+
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
+#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+/* Status */
+#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
+#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
+#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
+#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
+#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
+#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+/* Value */
+#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
+#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#undef MC_CMD_0x18_PRIVILEGE_CTG
+
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+/* See MC_CMD_CAPABILITIES */
+#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+/* This field contains a 16-bit value for each of the types of NVRAM area. The
+ * values are defined in the firmware/mc/platform/.c file for a specific board
+ * type, but otherwise have no meaning to the MC; they are used by the driver
+ * to manage selection of appropriate firmware updates.
+ */
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s) -- extended functionality
+ */
+#define MC_CMD_DBI_READX 0x19
+#undef MC_CMD_0x19_PRIVILEGE_CTG
+
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define MC_CMD_DBI_READX_IN_LENMIN 8
+#define MC_CMD_DBI_READX_IN_LENMAX 248
+#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
+#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define MC_CMD_DBI_READX_OUT_LENMIN 4
+#define MC_CMD_DBI_READX_OUT_LENMAX 252
+#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
+#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#undef MC_CMD_0x1a_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
+#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the LTSSM, if the build supports it.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
+#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#undef MC_CMD_0x1c_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state to set if UPDATE=1 */
+#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+#define MC_CMD_FW_RULES_ENGINE 0x5
+/* enum: Only this option is allowed for non-admin functions */
+#define MC_CMD_FW_DONT_CARE 0xffffffff
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
+#define MC_CMD_SHMUART_IN_FLAG_OFST 0
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+#undef MC_CMD_0x20_PRIVILEGE_CTG
+
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+/* MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
+#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
+#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+/* wipe statistics */
+#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define MC_CMD_RXD_MONITOR_IN_LEN 12
+#define MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * Copy the given ASCII string out onto UART and/or out of the network port.
+ */
+#define MC_CMD_PUTS 0x23
+#undef MC_CMD_0x23_PRIVILEGE_CTG
+
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define MC_CMD_PUTS_IN_LENMIN 13
+#define MC_CMD_PUTS_IN_LENMAX 252
+#define MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define MC_CMD_PUTS_IN_DEST_OFST 0
+#define MC_CMD_PUTS_IN_UART_LBN 0
+#define MC_CMD_PUTS_IN_UART_WIDTH 1
+#define MC_CMD_PUTS_IN_PORT_LBN 1
+#define MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define MC_CMD_PUTS_IN_DHOST_OFST 4
+#define MC_CMD_PUTS_IN_DHOST_LEN 6
+#define MC_CMD_PUTS_IN_STRING_OFST 12
+#define MC_CMD_PUTS_IN_STRING_LEN 1
+#define MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define MC_CMD_PUTS_IN_STRING_MAXNUM 240
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+#undef MC_CMD_0x24_PRIVILEGE_CTG
+
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
+#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
+#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define MC_CMD_PHY_CAP_10HDX_LBN 1
+#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10FDX_LBN 2
+#define MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100HDX_LBN 3
+#define MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_100FDX_LBN 4
+#define MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define MC_CMD_PHY_CAP_ASYM_LBN 9
+#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define MC_CMD_PHY_CAP_AN_LBN 10
+#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
+#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define MC_CMD_MEDIA_QSFP_PLUS 0x7
+#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
+#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
+#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#undef MC_CMD_0x25_PRIVILEGE_CTG
+
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
+ */
+#define MC_CMD_POLL_BIST 0x26
+#undef MC_CMD_0x26_PRIVILEGE_CTG
+
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
+#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
+#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+/* Status of each channel A */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel C */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+/* Status of each channel D */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+/* Enum values, see field(s): */
+/* CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX0 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* enum: RX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7
+/* enum: RX1 DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Returns a bitmask of loopback modes available at each speed.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#undef MC_CMD_0x28_PRIVILEGE_CTG
+
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/* Enum values, see field(s): */
+/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
+ */
+#define MC_CMD_GET_LINK 0x29
+#undef MC_CMD_0x29_PRIVILEGE_CTG
+
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+/* link-partner advertised capabilities */
+#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+/* Current loopback setting. */
+#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+/* Enum values, see field(s): */
+/* MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
+ */
+#define MC_CMD_SET_LINK 0x2a
+#undef MC_CMD_0x2a_PRIVILEGE_CTG
+
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define MC_CMD_SET_LINK_IN_LEN 16
+/* ??? */
+#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+/* Flags */
+#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#undef MC_CMD_0x2b_PRIVILEGE_CTG
+
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
+#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define MC_CMD_LED_OFF 0x0 /* enum */
+#define MC_CMD_LED_ON 0x1 /* enum */
+#define MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#undef MC_CMD_0x2c_PRIVILEGE_CTG
+
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define MC_CMD_SET_MAC_IN_LEN 28
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define MC_CMD_FCNTL_GENERATE 0x5
+#define MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define MC_CMD_SET_MAC_OUT_LEN 0
+
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#undef MC_CMD_0x2d_PRIVILEGE_CTG
+
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#undef MC_CMD_0x2e_PRIVILEGE_CTG
+
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define MC_CMD_MAC_STATS_IN_LEN 20
+/* ??? */
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define MC_CMD_GMAC_DMABUF_END 0x5f
+#define MC_CMD_MAC_GENERATION_END 0x60 /* enum */
+#define MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define MC_CMD_SRIOV_IN_LEN 12
+#define MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define MC_CMD_SRIOV_OUT_LEN 8
+#define MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define MC_CMD_MEMCPY_IN_LENMIN 32
+#define MC_CMD_MEMCPY_IN_LENMAX 224
+#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
+#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#undef MC_CMD_0x32_PRIVILEGE_CTG
+
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
+#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/* MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/* MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#undef MC_CMD_0x33_PRIVILEGE_CTG
+
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#undef MC_CMD_0x34_PRIVILEGE_CTG
+
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#undef MC_CMD_0x36_PRIVILEGE_CTG
+
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
+#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#undef MC_CMD_0x37_PRIVILEGE_CTG
+
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define MC_CMD_NVRAM_INFO_IN_LEN 4
+#define MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
+#define MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held).
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#undef MC_CMD_0x38_PRIVILEGE_CTG
+
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request.
+ * Use NVRAM_UPDATE_START_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START
+ * request with additional flags indicating version of command in use. See
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use
+ * paired up with NVRAM_UPDATE_FINISH_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#undef MC_CMD_0x39_PRIVILEGE_CTG
+
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define MC_CMD_NVRAM_READ_IN_LEN 12
+#define MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#undef MC_CMD_0x3a_PRIVILEGE_CTG
+
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#undef MC_CMD_0x3b_PRIVILEGE_CTG
+
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
+ * type/offset/length), EACCES (if PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#undef MC_CMD_0x3c_PRIVILEGE_CTG
+
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH
+ * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
+ * request with additional flags indicating version of NVRAM_UPDATE commands in
+ * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended
+ * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
+ * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse:
+ *
+ * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure
+ * firmware validation where applicable back to the host.
+ *
+ * Medford only: For signed firmware images, such as those for medford, the MC
+ * firmware verifies the signature before marking the firmware image as valid.
+ * This process takes a few seconds to complete. So is likely to take more than
+ * the MCDI timeout. Hence signature verification is initiated when
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
+/* Result of nvram update completion processing */
+#define MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
+/* enum: Verify succeeded without any errors. */
+#define MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
+/* enum: CMS format verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2
+/* enum: Invalid CMS format in image metadata. */
+#define MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3
+/* enum: Message digest verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5
+/* enum: Signature verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6
+/* enum: There are no valid signatures in the image. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7
+/* enum: Trusted approvers verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8
+/* enum: The Trusted approver's list is empty. */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9
+/* enum: Signature chain verification failed due to an internal error. */
+#define MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#undef MC_CMD_0x3d_PRIVILEGE_CTG
+
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define MC_CMD_REBOOT_IN_LEN 4
+#define MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#undef MC_CMD_0x3e_PRIVILEGE_CTG
+
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#undef MC_CMD_0x3f_PRIVILEGE_CTG
+
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define MC_CMD_REBOOT_MODE_IN_LEN 4
+#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#undef MC_CMD_0x41_PRIVILEGE_CTG
+
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage (millivolts) */
+#define MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+#define MC_CMD_SENSOR_ENTRY_OFST 4
+#define MC_CMD_SENSOR_ENTRY_LEN 8
+#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define MC_CMD_SENSOR_ENTRY_MINNUM 0
+#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 0 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#undef MC_CMD_0x42_PRIVILEGE_CTG
+
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+#undef MC_CMD_0x43_PRIVILEGE_CTG
+
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#undef MC_CMD_0x45_PRIVILEGE_CTG
+
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#undef MC_CMD_0x46_PRIVILEGE_CTG
+
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#undef MC_CMD_0x47_PRIVILEGE_CTG
+
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
+ */
+#define MC_CMD_TESTASSERT 0x49
+#undef MC_CMD_0x49_PRIVILEGE_CTG
+
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_TESTASSERT_V2_IN msgrequest */
+#define MC_CMD_TESTASSERT_V2_IN_LEN 4
+/* How to provoke the assertion */
+#define MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
+ * you're testing firmware, this is what you want.
+ */
+#define MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+/* enum: Assert using assert(0); */
+#define MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+/* enum: Deliberately trigger a watchdog */
+#define MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+/* enum: Deliberately trigger a trap by loading from an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+/* enum: Deliberately trigger a trap by storing to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+/* enum: Jump to an invalid address */
+#define MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+
+/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
+#define MC_CMD_TESTASSERT_V2_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#undef MC_CMD_0x4a_PRIVILEGE_CTG
+
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define MC_CMD_WORKAROUND_BUG26807 0x6
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_WORKAROUND_BUG61265 0x7
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#undef MC_CMD_0x4b_PRIVILEGE_CTG
+
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#undef MC_CMD_0x4c_PRIVILEGE_CTG
+
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+/* 0-8 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+/* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+/* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+/* direction */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#undef MC_CMD_0x4e_PRIVILEGE_CTG
+
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+/* interpretation is is sensor-specific. */
+#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+#undef MC_CMD_0x51_PRIVILEGE_CTG
+
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+#undef MC_CMD_0x52_PRIVILEGE_CTG
+
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requesting function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#undef MC_CMD_0x55_PRIVILEGE_CTG
+
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation
+ */
+#define MC_CMD_CLP 0x56
+#undef MC_CMD_0x56_PRIVILEGE_CTG
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define MC_CMD_CLP_IN_OP_OFST 0
+/* enum: Return to factory default settings */
+#define MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define MC_CMD_CLP_IN_DEFAULT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define MC_CMD_CLP_IN_SET_MAC_LEN 12
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* MAC address assigned to port */
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define MC_CMD_CLP_IN_GET_MAC_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+/* Boot flag */
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/* MC_CMD_CLP_IN_OP_OFST 0 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+#undef MC_CMD_0x57_PRIVILEGE_CTG
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MUM_IN msgrequest */
+#define MC_CMD_MUM_IN_LEN 4
+#define MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define MC_CMD_MUM_IN_OP_LBN 0
+#define MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define MC_CMD_MUM_IN_CMD_OFST 0
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to read from registers of */
+#define MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define MC_CMD_MUM_IN_READ_ADDR_OFST 8
+/* Number of words to read. */
+#define MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* ID of (device connected to MUM) to write to registers of */
+#define MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/* MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+/* Words to write */
+#define MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MUM I2C cmd code */
+#define MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+/* Number of bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+/* Number of bytes to read */
+#define MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+/* Bytes to write */
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/* Enable/disable debug output to UART */
+#define MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Bit-mask of clocks to be programmed */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+/* Enable/Disable FPGA config from flash */
+#define MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+/* returned data */
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_READ_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO IN register. */
+#define MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+/* The second 32-bit word read from the GPIO OUT register. */
+#define MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+/* in bytes */
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
+/* MC_CMD_RESOURCE_SPECIFIER enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Spare partition 0 */
+#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Used for XIP code of shmbooted images */
+#define NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Manufacturing partition. Used during manufacture to pass information
+ * between XJTAG and Manftest.
+ */
+#define NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+/* LICENSED_APP_ID structuredef */
+#define LICENSED_APP_ID_LEN 4
+#define LICENSED_APP_ID_ID_OFST 0
+/* enum: OpenOnload */
+#define LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+/* enum: TCP Direct */
+#define LICENSED_APP_ID_TCP_DIRECT 0x100
+/* enum: Low Latency */
+#define LICENSED_APP_ID_LOW_LATENCY 0x200
+/* enum: SolarCapture Tap */
+#define LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+/* enum: Capture SolarSystem 40G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+#define LICENSED_APP_ID_ID_LBN 0
+#define LICENSED_APP_ID_ID_WIDTH 32
+
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_TCP_DIRECT_LBN 8
+#define LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
+#define LICENSED_V3_APPS_LOW_LATENCY_LBN 9
+#define LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
+#define LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
+/* TX_TIMESTAMP_EVENT structuredef */
+#define TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is the low part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define RSS_MODE_HASH_SELECTOR_OFST 0
+#define RSS_MODE_HASH_SELECTOR_LEN 1
+#define RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define RSS_MODE_HASH_DST_ADDR_LBN 1
+#define RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define RSS_MODE_HASH_SRC_PORT_LBN 2
+#define RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define RSS_MODE_HASH_DST_PORT_LBN 3
+#define RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define RSS_MODE_HASH_SELECTOR_LBN 0
+#define RSS_MODE_HASH_SELECTOR_WIDTH 8
+
+/* CTPIO_STATS_MAP structuredef */
+#define CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define CTPIO_STATS_MAP_VI_OFST 0
+#define CTPIO_STATS_MAP_VI_LEN 2
+#define CTPIO_STATS_MAP_VI_LBN 0
+#define CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define CTPIO_STATS_MAP_BUCKET_OFST 2
+#define CTPIO_STATS_MAP_BUCKET_LEN 2
+#define CTPIO_STATS_MAP_BUCKET_LBN 16
+#define CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+#undef MC_CMD_0x50_PRIVILEGE_CTG
+
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+#undef MC_CMD_0x80_PRIVILEGE_CTG
+
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
+#define MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_V2_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+/* Actual configuration applied on the card */
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+#undef MC_CMD_0x81_PRIVILEGE_CTG
+
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
+#define MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+#undef MC_CMD_0x82_PRIVILEGE_CTG
+
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+/* Flags related to Qbb flow control mode. */
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+#undef MC_CMD_0x83_PRIVILEGE_CTG
+
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+#undef MC_CMD_0x84_PRIVILEGE_CTG
+
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+#undef MC_CMD_0x85_PRIVILEGE_CTG
+
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+#undef MC_CMD_0x86_PRIVILEGE_CTG
+
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+#undef MC_CMD_0x5b_PRIVILEGE_CTG
+
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define MC_CMD_PROXY_CMD_OUT_LEN 0
+
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+/* enum: An invalid handle. */
+#define MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+#undef MC_CMD_0x58_PRIVILEGE_CTG
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+#undef MC_CMD_0x5f_PRIVILEGE_CTG
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#undef MC_CMD_0x87_PRIVILEGE_CTG
+
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#undef MC_CMD_0x88_PRIVILEGE_CTG
+
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#undef MC_CMD_0x89_PRIVILEGE_CTG
+
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+/* PORT_CONFIG_ENTRY structuredef */
+#define PORT_CONFIG_ENTRY_LEN 16
+/* External port number (label) */
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_OFST 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_LBN 0
+#define PORT_CONFIG_ENTRY_EXT_NUMBER_WIDTH 8
+/* Port core location */
+#define PORT_CONFIG_ENTRY_CORE_OFST 1
+#define PORT_CONFIG_ENTRY_CORE_LEN 1
+#define PORT_CONFIG_ENTRY_STANDALONE 0x0 /* enum */
+#define PORT_CONFIG_ENTRY_MASTER 0x1 /* enum */
+#define PORT_CONFIG_ENTRY_SLAVE 0x2 /* enum */
+#define PORT_CONFIG_ENTRY_CORE_LBN 8
+#define PORT_CONFIG_ENTRY_CORE_WIDTH 8
+/* Internal number (HW resource) relative to the core */
+#define PORT_CONFIG_ENTRY_INT_NUMBER_OFST 2
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LEN 1
+#define PORT_CONFIG_ENTRY_INT_NUMBER_LBN 16
+#define PORT_CONFIG_ENTRY_INT_NUMBER_WIDTH 8
+/* Reserved */
+#define PORT_CONFIG_ENTRY_RSVD_OFST 3
+#define PORT_CONFIG_ENTRY_RSVD_LEN 1
+#define PORT_CONFIG_ENTRY_RSVD_LBN 24
+#define PORT_CONFIG_ENTRY_RSVD_WIDTH 8
+/* Bitmask of KR lanes used by the port */
+#define PORT_CONFIG_ENTRY_LANES_OFST 4
+#define PORT_CONFIG_ENTRY_LANES_LBN 32
+#define PORT_CONFIG_ENTRY_LANES_WIDTH 32
+/* Port capabilities (MC_CMD_PHY_CAP_*) */
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_OFST 8
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_LBN 64
+#define PORT_CONFIG_ENTRY_SUPPORTED_CAPS_WIDTH 32
+/* Reserved (align to 16 bytes) */
+#define PORT_CONFIG_ENTRY_RSVD2_OFST 12
+#define PORT_CONFIG_ENTRY_RSVD2_LBN 96
+#define PORT_CONFIG_ENTRY_RSVD2_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+#undef MC_CMD_0x8a_PRIVILEGE_CTG
+
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_OUT/HANDLE */
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#undef MC_CMD_0xe4_PRIVILEGE_CTG
+
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
+/* MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT msgresponse:
+ * GET_PARSER_DISP_INFO response format for OP_GET_SECURITY_RULE_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_LEN 36
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* a version number representing the set of rule lookups that are implemented
+ * by the currently running firmware
+ */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_OFST 4
+/* enum: implements lookup sequences described in SF-114946-SW draft C */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_RULES_VERSION_SF_114946_SW_C 0x0
+/* the number of nodes in the subnet map */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_NODES_OFST 8
+/* the number of entries in one subnet map node */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_MAP_NUM_ENTRIES_PER_NODE_OFST 12
+/* minimum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MIN_OFST 16
+/* maximum valid value for a subnet ID in a subnet map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_SUBNET_ID_MAX_OFST 20
+/* the number of entries in the local and remote port range maps */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_TREE_NUM_ENTRIES_OFST 24
+/* minimum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MIN_OFST 28
+/* maximum valid value for a portrange ID in a port range map leaf */
+#define MC_CMD_GET_PARSER_DISP_SECURITY_RULE_INFO_OUT_PORTRANGE_ID_MAX_OFST 32
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
+ * Please note that this interface is only of use to debug tools which have
+ * knowledge of firmware and hardware data structures; nothing here is intended
+ * for use by normal driver code.
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+#undef MC_CMD_0xe5_PRIVILEGE_CTG
+
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine (with original metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+/* enum: RX1 dispatcher CPU (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+/* enum: Miscellaneous other state (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: write a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address (DICPU targets) or LUE index (LUE targets) */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* selector (for MISC_STATE target) */
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+/* enum: Port to datapath mapping */
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+#undef MC_CMD_0xb6_PRIVILEGE_CTG
+
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#undef MC_CMD_0xb8_PRIVILEGE_CTG
+
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#undef MC_CMD_0xb9_PRIVILEGE_CTG
+
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+#undef MC_CMD_0x8b_PRIVILEGE_CTG
+
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+#undef MC_CMD_0x8c_PRIVILEGE_CTG
+
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+#undef MC_CMD_0xba_PRIVILEGE_CTG
+
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+#undef MC_CMD_0xbb_PRIVILEGE_CTG
+
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#undef MC_CMD_0x8d_PRIVILEGE_CTG
+
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+#undef MC_CMD_0x8e_PRIVILEGE_CTG
+
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+#undef MC_CMD_0x8f_PRIVILEGE_CTG
+
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+#undef MC_CMD_0x90_PRIVILEGE_CTG
+
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#undef MC_CMD_0xb0_PRIVILEGE_CTG
+
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#undef MC_CMD_0xb1_PRIVILEGE_CTG
+
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#undef MC_CMD_0xbc_PRIVILEGE_CTG
+
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#undef MC_CMD_0xbd_PRIVILEGE_CTG
+
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#undef MC_CMD_0x91_PRIVILEGE_CTG
+
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+#undef MC_CMD_0xbe_PRIVILEGE_CTG
+
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#undef MC_CMD_0xb2_PRIVILEGE_CTG
+
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#undef MC_CMD_0xb3_PRIVILEGE_CTG
+
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#undef MC_CMD_0xb4_PRIVILEGE_CTG
+
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+/* the desired maximum fill level */
+#define MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+#undef MC_CMD_0xb5_PRIVILEGE_CTG
+
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+#undef MC_CMD_0x92_PRIVILEGE_CTG
+
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+#undef MC_CMD_0x93_PRIVILEGE_CTG
+
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+#undef MC_CMD_0x94_PRIVILEGE_CTG
+
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+#undef MC_CMD_0x95_PRIVILEGE_CTG
+
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+#undef MC_CMD_0x63_PRIVILEGE_CTG
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+#undef MC_CMD_0x96_PRIVILEGE_CTG
+
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+#undef MC_CMD_0x97_PRIVILEGE_CTG
+
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+#undef MC_CMD_0x98_PRIVILEGE_CTG
+
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+/* The number of VLAN tags to transparently insert/remove. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+#undef MC_CMD_0x99_PRIVILEGE_CTG
+
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+#undef MC_CMD_0x5d_PRIVILEGE_CTG
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The new MAC address to assign to this v-adaptor */
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+#undef MC_CMD_0x5e_PRIVILEGE_CTG
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+#undef MC_CMD_0x61_PRIVILEGE_CTG
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#undef MC_CMD_0x9a_PRIVILEGE_CTG
+
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#undef MC_CMD_0x9b_PRIVILEGE_CTG
+
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#undef MC_CMD_0x9c_PRIVILEGE_CTG
+
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#undef MC_CMD_0x9d_PRIVILEGE_CTG
+
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#undef MC_CMD_0x9e_PRIVILEGE_CTG
+
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+/* enum: guaranteed invalid RSS context handle value */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#undef MC_CMD_0x9f_PRIVILEGE_CTG
+
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#undef MC_CMD_0xa0_PRIVILEGE_CTG
+
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#undef MC_CMD_0xa1_PRIVILEGE_CTG
+
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#undef MC_CMD_0xa2_PRIVILEGE_CTG
+
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#undef MC_CMD_0xa3_PRIVILEGE_CTG
+
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#undef MC_CMD_0xe1_PRIVILEGE_CTG
+
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#undef MC_CMD_0xe2_PRIVILEGE_CTG
+
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#undef MC_CMD_0xa4_PRIVILEGE_CTG
+
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+/* enum: guaranteed invalid .1p mapping handle value */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#undef MC_CMD_0xa5_PRIVILEGE_CTG
+
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#undef MC_CMD_0xa6_PRIVILEGE_CTG
+
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#undef MC_CMD_0xa7_PRIVILEGE_CTG
+
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+#undef MC_CMD_0xbf_PRIVILEGE_CTG
+
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+#undef MC_CMD_0xc0_PRIVILEGE_CTG
+
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#undef MC_CMD_0xa8_PRIVILEGE_CTG
+
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#undef MC_CMD_0xa9_PRIVILEGE_CTG
+
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#undef MC_CMD_0xaa_PRIVILEGE_CTG
+
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+#undef MC_CMD_0xeb_PRIVILEGE_CTG
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+#undef MC_CMD_0x62_PRIVILEGE_CTG
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#undef MC_CMD_0xab_PRIVILEGE_CTG
+
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#undef MC_CMD_0xc1_PRIVILEGE_CTG
+
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
+/* enum: pad to 64 bytes */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+/* enum: pad to 128 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+/* enum: pad to 256 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#undef MC_CMD_0xc2_PRIVILEGE_CTG
+
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+#undef MC_CMD_0xac_PRIVILEGE_CTG
+
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+#undef MC_CMD_0xad_PRIVILEGE_CTG
+
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+/* enum: Leave the system clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for inter-core clock domain */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for DPCPU clock domain */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for MC clock domain */
+#define MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+/* enum: Leave the MC clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 28
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* enum: The system clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+/* enum: The dpcpu clock domain doesn't exist */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+/* Resulting PCS frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting MC frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+/* Resulting rmon frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+/* Resulting vswitch frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+#undef MC_CMD_0xae_PRIVILEGE_CTG
+
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+/* enum: RxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#undef MC_CMD_0xe3_PRIVILEGE_CTG
+
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+#undef MC_CMD_0xe6_PRIVILEGE_CTG
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+#undef MC_CMD_0xe7_PRIVILEGE_CTG
+
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+#undef MC_CMD_0xe8_PRIVILEGE_CTG
+
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#undef MC_CMD_0xe9_PRIVILEGE_CTG
+
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+#undef MC_CMD_0xea_PRIVILEGE_CTG
+
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+#undef MC_CMD_0xec_PRIVILEGE_CTG
+
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#undef MC_CMD_0xed_PRIVILEGE_CTG
+
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+#undef MC_CMD_0xee_PRIVILEGE_CTG
+
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+#undef MC_CMD_0xef_PRIVILEGE_CTG
+
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+/* Offset from which to read the data */
+#define MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+/* Offset at which to write the data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+/* Length of data */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+/* Reserved for future use */
+#define MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+#undef MC_CMD_0xf0_PRIVILEGE_CTG
+
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+#undef MC_CMD_0xf1_PRIVILEGE_CTG
+
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15, Huntington) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-31)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+/* enum: Edge DFE DLEV (0-128 for Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+/* enum: Variable Gain Amplifier (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+/* enum: CTLE EQ Capacitor (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (0-7, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude (Huntington, Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+/* enum: TX Termination Impedance control (Huntington) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+/* enum: TX Amplitude Fine control (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+#undef MC_CMD_0xf2_PRIVILEGE_CTG
+
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
+#define MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+/* enum: DFE DLev */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+/* enum: Figure of Merit */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+/* enum: CTLE EQ Capacitor (HF Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (DC Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
+#define MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
+ */
+#define MC_CMD_LICENSING 0xf3
+#undef MC_CMD_0xf3_PRIVILEGE_CTG
+
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+#undef MC_CMD_0xd0_PRIVILEGE_CTG
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses Returns EAGAIN if license
+ * processing (updating) has been started but not yet completed.
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+#undef MC_CMD_0xd1_PRIVILEGE_CTG
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
+#undef MC_CMD_0xf4_PRIVILEGE_CTG
+
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.) Not used for V3 licensing
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#undef MC_CMD_0xf5_PRIVILEGE_CTG
+
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+#undef MC_CMD_0xd2_PRIVILEGE_CTG
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+#undef MC_CMD_0xd3_PRIVILEGE_CTG
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+#undef MC_CMD_0xf6_PRIVILEGE_CTG
+
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+/* enum: validate application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+/* arguments specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+/* result specific to this particular operation */
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+/* validation challenge */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+/* validation response */
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+/* the type of operation requested */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+/* flag */
+#define MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+#undef MC_CMD_0xd4_PRIVILEGE_CTG
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
+/* challenge for validation (384 bits) */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
+/* validation response to challenge in the form of ECDSA signature consisting
+ * of two 384-bit integers, r and s, in big-endian order. The signature signs a
+ * SHA-384 digest of a message constructed from the concatenation of the input
+ * message and the remaining fields of this output message, e.g. challenge[48
+ * bytes] ... expiry_time[4 bytes] ...
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* base MAC address of the NIC stored in NVRAM (note that this is a constant
+ * value for a given NIC regardless which function is calling, effectively this
+ * is PF0 base MAC address)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
+/* MAC address of v-adaptor associated with the client. If no such v-adapator
+ * exists, then the field is filled with 0xFF.
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+#undef MC_CMD_0xd5_PRIVILEGE_CTG
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3_TEMPORARY
+ * Perform operations to support installation of a single temporary license in
+ * the adapter, in addition to those found in the licensing partition. See
+ * SF-116124-SW for an overview of how this could be used. The license is
+ * stored in MC persistent data and so will survive a MC reboot, but will be
+ * erased when the adapter is power cycled
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
+#undef MC_CMD_0xd6_PRIVILEGE_CTG
+
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
+/* operation code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+/* enum: install a new license, overwriting any existing temporary license.
+ * This is an asynchronous operation owing to the time taken to validate an
+ * ECDSA license
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+/* enum: clear the license immediately rather than waiting for the next power
+ * cycle
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
+ * operation
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+/* ECDSA license and signature */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
+/* status code */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+/* enum: finished validating and installing license */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+/* enum: license validation and installation in progress */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+/* enum: licensing error. More specific error messages are not provided to
+ * avoid exposing details of the licensing system to the client
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure RX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#undef MC_CMD_0xf7_PRIVILEGE_CTG
+
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current RX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#undef MC_CMD_0xf8_PRIVILEGE_CTG
+
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+#undef MC_CMD_0xf9_PRIVILEGE_CTG
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+/* the type of configuration setting to change */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+#undef MC_CMD_0xfa_PRIVILEGE_CTG
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+#undef MC_CMD_0xfb_PRIVILEGE_CTG
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+/* enum: receive to just the specified queue */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+#undef MC_CMD_0xfc_PRIVILEGE_CTG
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+/* receive mode */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+/* enum: receiving to just the specified queue */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+#undef MC_CMD_0xfe_PRIVILEGE_CTG
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+/* The maximum number of VFs the device can expose in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+#undef MC_CMD_0xff_PRIVILEGE_CTG
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*) */
+#define MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+/* Default (canonical) board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+/* Current board mode */
+#define MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+#undef MC_CMD_0x100_PRIVILEGE_CTG
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define MC_CMD_READ_ATB_IN_LEN 16
+#define MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define MC_CMD_READ_ATB_OUT_LEN 4
+#define MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+#undef MC_CMD_0x59_PRIVILEGE_CTG
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+/* enum: Bug 17230 work around. */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+#undef MC_CMD_0x5a_PRIVILEGE_CTG
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+#undef MC_CMD_0x5c_PRIVILEGE_CTG
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the curent range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+#undef MC_CMD_0x101_PRIVILEGE_CTG
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+/* Maximum acceptable snapshot length. */
+#define MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+#undef MC_CMD_0x102_PRIVILEGE_CTG
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+/* Checksum of data after logical OR of pairs in area 0 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+/* Total number of mismatched bits between pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+/* Checksum of data after logical OR of pairs in area 1 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+/* Total number of mismatched bits between pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+/* Checksum of data after logical OR of pairs in area 2 */
+#define MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+#undef MC_CMD_0x60_PRIVILEGE_CTG
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+#undef MC_CMD_0x103_PRIVILEGE_CTG
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+/* Data */
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+#undef MC_CMD_0x104_PRIVILEGE_CTG
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+/* Start address (byte) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+/* Data */
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+#undef MC_CMD_0x105_PRIVILEGE_CTG
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+/* Sector size */
+#define MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+/* Sector type */
+#define MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+/* Sector data */
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+#undef MC_CMD_0x106_PRIVILEGE_CTG
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+/* Sector data */
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+#undef MC_CMD_0x107_PRIVILEGE_CTG
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+#undef MC_CMD_0x108_PRIVILEGE_CTG
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+/* Total number of bad (non-blank) locations */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+#undef MC_CMD_0x109_PRIVILEGE_CTG
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+/* Count (bytes) */
+#define MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+#undef MC_CMD_0x10a_PRIVILEGE_CTG
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+#undef MC_CMD_0x10b_PRIVILEGE_CTG
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EXEC_SIGNED
+ * Check the CMAC of the contents of IMEM and DMEM against the value supplied
+ * and if correct begin execution from the start of IMEM. The caller supplies a
+ * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
+ * computation runs from the start of IMEM, and from the start of DMEM + 16k,
+ * to match flash booting. The command will respond with EINVAL if the CMAC
+ * does match, otherwise it will respond with success before it jumps to IMEM.
+ */
+#define MC_CMD_EXEC_SIGNED 0x10c
+#undef MC_CMD_0x10c_PRIVILEGE_CTG
+
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_EXEC_SIGNED_IN msgrequest */
+#define MC_CMD_EXEC_SIGNED_IN_LEN 28
+/* the length of code to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+/* the length of date to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+/* the XPM sector containing the key to use */
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+/* the expected CMAC value */
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
+
+/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
+#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PREPARE_SIGNED
+ * Prepare to upload a signed image. This will scrub the specified length of
+ * the data region, which must be at least as large as the DATALEN supplied to
+ * MC_CMD_EXEC_SIGNED.
+ */
+#define MC_CMD_PREPARE_SIGNED 0x10d
+#undef MC_CMD_0x10d_PRIVILEGE_CTG
+
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
+#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
+/* the length of data area to clear */
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+
+/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
+#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_RULE
+ * Set blacklist and/or whitelist action for a particular match criteria.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SET_SECURITY_RULE 0x10f
+#undef MC_CMD_0x10f_PRIVILEGE_CTG
+
+#define MC_CMD_0x10f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SECURITY_RULE_IN msgrequest */
+#define MC_CMD_SET_SECURITY_RULE_IN_LEN 92
+/* fields to include in match criteria */
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_FIELDS_OFST 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_LBN 0
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_LBN 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_IP_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_LBN 2
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_LBN 3
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_LBN 4
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_MAC_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_LBN 5
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_LBN 10
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_PHYSICAL_PORT_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_LBN 11
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_RESERVED_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_LBN 12
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_SUBNET_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_LBN 13
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_REMOTE_PORTRANGE_ID_WIDTH 1
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_LBN 14
+#define MC_CMD_SET_SECURITY_RULE_IN_MATCH_LOCAL_PORTRANGE_ID_WIDTH 1
+/* remote MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_MAC_LEN 6
+/* remote port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_OFST 10
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORT_LEN 2
+/* local MAC address to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_OFST 12
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_MAC_LEN 6
+/* local port to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_OFST 18
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_OFST 20
+#define MC_CMD_SET_SECURITY_RULE_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_OFST 22
+#define MC_CMD_SET_SECURITY_RULE_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_OFST 24
+#define MC_CMD_SET_SECURITY_RULE_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_OFST 26
+#define MC_CMD_SET_SECURITY_RULE_IN_IP_PROTO_LEN 2
+/* Physical port to match (as little-endian 32-bit value) */
+#define MC_CMD_SET_SECURITY_RULE_IN_PHYSICAL_PORT_OFST 28
+/* Reserved; set to 0 */
+#define MC_CMD_SET_SECURITY_RULE_IN_RESERVED_OFST 32
+/* remote IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_OFST 36
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_IP_LEN 16
+/* local IP address to match (as bytes in network order; set last 12 bytes to 0
+ * for IPv4 address)
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_OFST 52
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_IP_LEN 16
+/* remote subnet ID to match (as little-endian 32-bit value); note that remote
+ * subnets are matched by mapping the remote IP address to a "subnet ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_SUBNET_MAP_SET_NODE appropriately
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_SUBNET_ID_OFST 68
+/* remote portrange ID to match (as little-endian 32-bit value); note that
+ * remote port ranges are matched by mapping the remote port to a "portrange
+ * ID" via a data structure which must already have been configured using
+ * MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_REMOTE_PORTRANGE_ID_OFST 72
+/* local portrange ID to match (as little-endian 32-bit value); note that local
+ * port ranges are matched by mapping the local port to a "portrange ID" via a
+ * data structure which must already have been configured using
+ * MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_LOCAL_PORTRANGE_ID_OFST 76
+/* set the action for transmitted packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_OFST 80
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current TX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_TX_ACTION_UNCHANGED 0xffffffff
+/* set the action for received packets matching this rule */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_OFST 84
+/* enum: make no decision */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_NONE 0x0
+/* enum: decide to accept the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_WHITELIST 0x1
+/* enum: decide to drop the packet */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_BLACKLIST 0x2
+/* enum: do not change the current RX action */
+#define MC_CMD_SET_SECURITY_RULE_IN_RX_ACTION_UNCHANGED 0xffffffff
+/* counter ID to associate with this rule; IDs are allocated using
+ * MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_OFST 88
+/* enum: special value for the null counter ID */
+#define MC_CMD_SET_SECURITY_RULE_IN_COUNTER_ID_NONE 0x0
+
+/* MC_CMD_SET_SECURITY_RULE_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LEN 28
+/* new reference count for uses of counter ID */
+#define MC_CMD_SET_SECURITY_RULE_OUT_COUNTER_REFCNT_OFST 0
+/* constructed match bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_OFST 4
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_MATCH_BITS_LEN 12
+/* constructed discriminator bits for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_DISCRIMINATOR_OFST 16
+/* base location for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_BASE_OFST 20
+/* step for probes for this rule (as a tracing aid only) */
+#define MC_CMD_SET_SECURITY_RULE_OUT_LUE_PROBE_STEP_OFST 24
+
+
+/***********************************/
+/* MC_CMD_RESET_SECURITY_RULES
+ * Reset all blacklist and whitelist actions for a particular physical port, or
+ * all ports. (Medford-only; for use by SolarSecure apps, not directly by
+ * drivers. See SF-114946-SW.) NOTE - this message definition is provisional.
+ * It has not yet been used in any released code and may change during
+ * development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_RESET_SECURITY_RULES 0x110
+#undef MC_CMD_0x110_PRIVILEGE_CTG
+
+#define MC_CMD_0x110_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RESET_SECURITY_RULES_IN msgrequest */
+#define MC_CMD_RESET_SECURITY_RULES_IN_LEN 4
+/* index of physical port to reset (or ALL_PHYSICAL_PORTS to reset all) */
+#define MC_CMD_RESET_SECURITY_RULES_IN_PHYSICAL_PORT_OFST 0
+/* enum: special value to reset all physical ports */
+#define MC_CMD_RESET_SECURITY_RULES_IN_ALL_PHYSICAL_PORTS 0xffffffff
+
+/* MC_CMD_RESET_SECURITY_RULES_OUT msgresponse */
+#define MC_CMD_RESET_SECURITY_RULES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SECURITY_RULESET_VERSION
+ * Return a large hash value representing a "version" of the complete set of
+ * currently active blacklist / whitelist rules and associated data structures.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION 0x111
+#undef MC_CMD_0x111_PRIVILEGE_CTG
+
+#define MC_CMD_0x111_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_IN msgrequest */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_SECURITY_RULESET_VERSION_OUT msgresponse */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMIN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LENMAX 252
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_LEN(num) (0+1*(num))
+/* Opaque hash value; length may vary depending on the hash scheme used */
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_OFST 0
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_LEN 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MINNUM 1
+#define MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION_MAXNUM 252
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC 0x112
+#undef MC_CMD_0x112_PRIVILEGE_CTG
+
+#define MC_CMD_0x112_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_LEN 4
+/* the number of new counter IDs to request */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_IN_NUM_COUNTERS_OFST 0
+
+/* MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_LEN(num) (4+4*(num))
+/* the number of new counter IDs allocated (may be less than the number
+ * requested if resources are unavailable)
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_NUM_COUNTERS_OFST 0
+/* new counter ID(s) */
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_ALLOC_OUT_COUNTER_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE
+ * Allocate counters for use with blacklist / whitelist rules. (Medford-only;
+ * for use by SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ * NOTE - this message definition is provisional. It has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE 0x113
+#undef MC_CMD_0x113_PRIVILEGE_CTG
+
+#define MC_CMD_0x113_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_IN msgrequest */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMIN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LENMAX 252
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_LEN(num) (4+4*(num))
+/* the number of counter IDs to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_NUM_COUNTERS_OFST 0
+/* the counter ID(s) to free */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_OFST 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_LEN 4
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MINNUM 0
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_IN_COUNTER_ID_MAXNUM 62
+
+/* MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT msgresponse */
+#define MC_CMD_SECURITY_RULE_COUNTER_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SUBNET_MAP_SET_NODE
+ * Atomically update a trie node in the map of subnets to subnet IDs. The
+ * constants in the descriptions of the fields of this message may be retrieved
+ * by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO. (Medford-
+ * only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE 0x114
+#undef MC_CMD_0x114_PRIVILEGE_CTG
+
+#define MC_CMD_0x114_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_IN msgrequest */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMIN 6
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LENMAX 252
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_LEN(num) (4+2*(num))
+/* node to update in the range 0 .. SUBNET_MAP_NUM_NODES-1 */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_NODE_ID_OFST 0
+/* SUBNET_MAP_NUM_ENTRIES_PER_NODE new entries; each entry is either a pointer
+ * to the next node, expressed as an offset in the trie memory (i.e. node ID
+ * multiplied by SUBNET_MAP_NUM_ENTRIES_PER_NODE), or a leaf value in the range
+ * SUBNET_ID_MIN .. SUBNET_ID_MAX
+ */
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_OFST 4
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_LEN 2
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MINNUM 1
+#define MC_CMD_SUBNET_MAP_SET_NODE_IN_ENTRY_MAXNUM 124
+
+/* MC_CMD_SUBNET_MAP_SET_NODE_OUT msgresponse */
+#define MC_CMD_SUBNET_MAP_SET_NODE_OUT_LEN 0
+
+/* PORTRANGE_TREE_ENTRY structuredef */
+#define PORTRANGE_TREE_ENTRY_LEN 4
+/* key for branch nodes (<= key takes left branch, > key takes right branch),
+ * or magic value for leaf nodes
+ */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_OFST 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_NODE_KEY 0xffff /* enum */
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_LBN 0
+#define PORTRANGE_TREE_ENTRY_BRANCH_KEY_WIDTH 16
+/* final portrange ID for leaf nodes (don't care for branch nodes) */
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_OFST 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LEN 2
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_LBN 16
+#define PORTRANGE_TREE_ENTRY_LEAF_PORTRANGE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE 0x115
+#undef MC_CMD_0x115_PRIVILEGE_CTG
+
+#define MC_CMD_0x115_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_REMOTE_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE
+ * Atomically update the entire tree mapping remote port ranges to portrange
+ * IDs. The constants in the descriptions of the fields of this message may be
+ * retrieved by the GET_SECURITY_RULE_INFO op of MC_CMD_GET_PARSER_DISP_INFO.
+ * (Medford-only; for use by SolarSecure apps, not directly by drivers. See
+ * SF-114946-SW.) NOTE - this message definition is provisional. It has not yet
+ * been used in any released code and may change during development. This note
+ * will be removed once it is regarded as stable.
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE 0x116
+#undef MC_CMD_0x116_PRIVILEGE_CTG
+
+#define MC_CMD_0x116_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN msgrequest */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMIN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LENMAX 252
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_LEN(num) (0+4*(num))
+/* PORTRANGE_TREE_NUM_ENTRIES new entries, each laid out as a
+ * PORTRANGE_TREE_ENTRY
+ */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_OFST 0
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_LEN 4
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MINNUM 1
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_IN_ENTRIES_MAXNUM 63
+
+/* MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT msgresponse */
+#define MC_CMD_LOCAL_PORTRANGE_MAP_SET_TREE_OUT_LEN 0
+
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+#undef MC_CMD_0x117_PRIVILEGE_CTG
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RX_BALANCING
+ * Configure a port upconverter to distribute the packets on both RX engines.
+ * Packets are distributed based on a table with the destination vFIFO. The
+ * index of the table is a hash of source and destination of IPV4 and VLAN
+ * priority.
+ */
+#define MC_CMD_RX_BALANCING 0x118
+#undef MC_CMD_0x118_PRIVILEGE_CTG
+
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RX_BALANCING_IN msgrequest */
+#define MC_CMD_RX_BALANCING_IN_LEN 16
+/* The RX port whose upconverter table will be modified */
+#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+/* The VLAN priority associated to the table index and vFIFO */
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+/* The resulting bit of SRC^DST for indexing the table */
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+/* The RX engine to which the vFIFO in the table entry will point to */
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+
+/* MC_CMD_RX_BALANCING_OUT msgresponse */
+#define MC_CMD_RX_BALANCING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TSA_BIND
+ * TSAN - TSAC binding communication protocol. Refer to SF-115479-TC for more
+ * info in respect to the binding protocol. This MCDI command is only available
+ * over a TLS secure connection between the TSAN and TSAC, and is not available
+ * to host software. Note- The messages definitions that do comprise this MCDI
+ * command deemed as provisional. This MCDI command has not yet been used in
+ * any released code and may change during development. This note will be
+ * removed once it is regarded as stable.
+ */
+#define MC_CMD_TSA_BIND 0x119
+#undef MC_CMD_0x119_PRIVILEGE_CTG
+
+#define MC_CMD_0x119_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TSA_BIND_IN msgrequest: Protocol operation code */
+#define MC_CMD_TSA_BIND_IN_LEN 4
+#define MC_CMD_TSA_BIND_IN_OP_OFST 0
+/* enum: Retrieve the TSAN ID from a TSAN. TSAN ID is a unique identifier for
+ * the network adapter. More specifically, TSAN ID equals the MAC address of
+ * the network adapter. TSAN ID is used as part of the TSAN authentication
+ * protocol. Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_GET_ID 0x1
+/* enum: Get a binding ticket from the TSAN. The binding ticket is used as part
+ * of the binding procedure to authorize the binding of an adapter to a TSAID.
+ * Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_GET_TICKET 0x2
+/* enum: Opcode associated with the propagation of a private key that TSAN uses
+ * as part of post-binding authentication procedure. More specifically, TSAN
+ * uses this key for a signing operation. TSAC uses the counterpart public key
+ * to verify the signature. Note - The post-binding authentication occurs when
+ * the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer to
+ * SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_OP_SET_KEY 0x3
+/* enum: Request an unbinding operation. Note- TSAN clears the binding ticket
+ * from the Nvram section.
+ */
+#define MC_CMD_TSA_BIND_OP_UNBIND 0x4
+
+/* MC_CMD_TSA_BIND_IN_GET_ID msgrequest */
+#define MC_CMD_TSA_BIND_IN_GET_ID_LEN 20
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_ID_OP_OFST 0
+/* Cryptographic nonce that TSAC generates and sends to TSAN. TSAC generates
+ * the nonce every time as part of the TSAN post-binding authentication
+ * procedure when the TSAN-TSAC connection terminates and TSAN does need to re-
+ * connect to the TSAC. Refer to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_OFST 4
+#define MC_CMD_TSA_BIND_IN_GET_ID_NONCE_LEN 16
+
+/* MC_CMD_TSA_BIND_IN_GET_TICKET msgrequest */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_LEN 4
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_GET_TICKET_OP_OFST 0
+
+/* MC_CMD_TSA_BIND_IN_SET_KEY msgrequest */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMIN 5
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LENMAX 252
+#define MC_CMD_TSA_BIND_IN_SET_KEY_LEN(num) (4+1*(num))
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_OP_OFST 0
+/* This data blob contains the private key generated by the TSAC. TSAN uses
+ * this key for a signing operation. Note- This private key is used in
+ * conjunction with the post-binding TSAN authentication procedure that occurs
+ * when the TSAN-TSAC connection terminates and TSAN tries to reconnect. Refer
+ * to SF-114946-SW for more information.
+ */
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_OFST 4
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_LEN 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MINNUM 1
+#define MC_CMD_TSA_BIND_IN_SET_KEY_DATKEY_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_IN_UNBIND msgrequest: Asks for the un-binding procedure */
+#define MC_CMD_TSA_BIND_IN_UNBIND_LEN 10
+/* The operation requested. */
+#define MC_CMD_TSA_BIND_IN_UNBIND_OP_OFST 0
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_OFST 4
+#define MC_CMD_TSA_BIND_IN_UNBIND_TSANID_LEN 6
+
+/* MC_CMD_TSA_BIND_OUT_GET_ID msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMIN 15
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_ID_LEN(num) (14+1*(num))
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_OP_OFST 0
+/* Rules engine type. Note- The rules engine type allows TSAC to further
+ * identify the connected endpoint (e.g. TSAN, NIC Emulator) type and take the
+ * proper action accordingly. As an example, TSAC uses the rules engine type to
+ * select the SF key that differs in the case of TSAN vs. NIC Emulator.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_OFST 4
+/* enum: Hardware rules engine. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_TSAN 0x1
+/* enum: Nic emulator rules engine. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_NEMU 0x2
+/* enum: SSFE. */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_RULE_ENGINE_SSFE 0x3
+/* TSAN unique identifier for the network adapter */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_OFST 8
+#define MC_CMD_TSA_BIND_OUT_GET_ID_TSANID_LEN 6
+/* The signature data blob. The signature is computed against the message
+ * formed by TSAN ID concatenated with the NONCE value. Refer to SF-115479-TC
+ * for more information also in respect to the private keys that are used to
+ * sign the message based on TSAN pre/post-binding authentication procedure.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_OFST 14
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_ID_SIG_MAXNUM 238
+
+/* MC_CMD_TSA_BIND_OUT_GET_TICKET msgresponse */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMIN 5
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LENMAX 252
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_LEN(num) (4+1*(num))
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_OP_OFST 0
+/* The ticket represents the data blob construct that TSAN sends to TSAC as
+ * part of the binding protocol. From the TSAN perspective the ticket is an
+ * opaque construct. For more info refer to SF-115479-TC.
+ */
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_OFST 4
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_LEN 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MINNUM 1
+#define MC_CMD_TSA_BIND_OUT_GET_TICKET_TICKET_MAXNUM 248
+
+/* MC_CMD_TSA_BIND_OUT_SET_KEY msgresponse */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_LEN 4
+/* The operation completion code. */
+#define MC_CMD_TSA_BIND_OUT_SET_KEY_OP_OFST 0
+
+/* MC_CMD_TSA_BIND_OUT_UNBIND msgresponse */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_INFO_OFST 4
+/* enum: Unbind successful. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_OK_UNBOUND 0x0
+/* enum: TSANID mismatch */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_BAD_TSANID 0x1
+/* enum: Unable to remove the binding ticket from persistent storage. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_REMOVE_TICKET 0x2
+/* enum: TSAN is not bound to a binding ticket. */
+#define MC_CMD_TSA_BIND_OUT_UNBIND_ERR_NOT_BOUND 0x3
+
+
+/***********************************/
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE
+ * Manage the persistent NVRAM cache of security rules created with
+ * MC_CMD_SET_SECURITY_RULE. Note that the cache is not automatically updated
+ * as rules are added or removed; the active ruleset must be explicitly
+ * committed to the cache. The cache may also be explicitly invalidated,
+ * without affecting the currently active ruleset. When the cache is valid, it
+ * will be loaded at power on or MC reboot, instead of the default ruleset.
+ * Rollback of the currently active ruleset to the cached version (when it is
+ * valid) is also supported. (Medford-only; for use by SolarSecure apps, not
+ * directly by drivers. See SF-114946-SW.) NOTE - this message definition is
+ * provisional. It has not yet been used in any released code and may change
+ * during development. This note will be removed once it is regarded as stable.
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE 0x11a
+#undef MC_CMD_0x11a_PRIVILEGE_CTG
+
+#define MC_CMD_0x11a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN msgrequest */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_LEN 4
+/* the operation to perform */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_OFST 0
+/* enum: reports the ruleset version that is cached in persistent storage but
+ * performs no other action
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_GET_CACHED_VERSION 0x0
+/* enum: rolls back the active state to the cached version. (May fail with
+ * ENOENT if there is no valid cached version.)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_ROLLBACK 0x1
+/* enum: commits the active state to the persistent cache */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_COMMIT 0x2
+/* enum: invalidates the persistent cache without affecting the active state */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_IN_OP_INVALIDATE 0x3
+
+/* MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT msgresponse */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMIN 5
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LENMAX 252
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_LEN(num) (4+1*(num))
+/* indicates whether the persistent cache is valid (after completion of the
+ * requested operation in the case of rollback, commit, or invalidate)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_OFST 0
+/* enum: persistent cache is invalid (the VERSION field will be empty in this
+ * case)
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_INVALID 0x0
+/* enum: persistent cache is valid */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_STATE_VALID 0x1
+/* cached ruleset version (after completion of the requested operation, in the
+ * case of rollback, commit, or invalidate) as an opaque hash value in the same
+ * form as MC_CMD_GET_SECURITY_RULESET_VERSION_OUT_VERSION
+ */
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_OFST 4
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_LEN 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MINNUM 1
+#define MC_CMD_MANAGE_SECURITY_RULESET_CACHE_OUT_VERSION_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PRIVATE_APPEND
+ * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
+ * if the tag is already present.
+ */
+#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
+#undef MC_CMD_0x11c_PRIVILEGE_CTG
+
+#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
+/* The tag to be appended */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+/* The length of the data */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+/* The data to be contained in the TLV structure */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
+#define MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
+#define MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_VERIFY_CONTENTS
+ * Verify that the contents of the XPM memory is correct (Medford only). This
+ * is used during manufacture to check that the XPM memory has been programmed
+ * correctly at ATE.
+ */
+#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
+#undef MC_CMD_0x11b_PRIVILEGE_CTG
+
+#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
+/* Data type to be checked */
+#define MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
+/* Number of sectors found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+/* Number of bytes found (test builds only) */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+/* Length of signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+/* Signature */
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
+#define MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
+
+
+/***********************************/
+/* MC_CMD_SET_EVQ_TMR
+ * Update the timer load, timer reload and timer mode values for a given EVQ.
+ * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
+ * be rounded up to the granularity supported by the hardware, then truncated
+ * to the range supported by the hardware. The resulting value after the
+ * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS
+ * and TMR_RELOAD_ACT_NS).
+ */
+#define MC_CMD_SET_EVQ_TMR 0x120
+#undef MC_CMD_0x120_PRIVILEGE_CTG
+
+#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_EVQ_TMR_IN msgrequest */
+#define MC_CMD_SET_EVQ_TMR_IN_LEN 16
+/* Function-relative queue instance */
+#define MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+/* Requested value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+/* Requested value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
+#define MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
+#define MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
+
+/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */
+#define MC_CMD_SET_EVQ_TMR_OUT_LEN 8
+/* Actual value for timer load (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+/* Actual value for timer reload (in nanoseconds) */
+#define MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+
+
+/***********************************/
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES
+ * Query properties about the event queue timers.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122
+#undef MC_CMD_0x122_PRIVILEGE_CTG
+
+#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
+/* Reserved for future use. */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
+ * nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
+ * allowed for timer load/reload counts.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
+ * multiple of this step size will be rounded in an implementation defined
+ * manner.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
+ * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+/* Timer durations requested via MCDI that are not a multiple of this step size
+ * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+/* For timers updated using the bug35388 workaround, this is the time interval
+ * (in nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count. This field is only meaningful if the bug35388 workaround
+ * is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+/* For timers updated using the bug35388 workaround, this is the maximum value
+ * allowed for timer load/reload counts. This field is only meaningful if the
+ * bug35388 workaround is enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+/* For timers updated using the bug35388 workaround, timer load/reload counts
+ * not a multiple of this step size will be rounded in an implementation
+ * defined manner. This field is only meaningful if the bug35388 workaround is
+ * enabled.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP
+ * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
+ * non used switch buffers.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
+#undef MC_CMD_0x11d_PRIVILEGE_CTG
+
+#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+/* Will the common pool be used as TX_vFIFO_ULL (1) */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
+/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
+/* Number of buffers to reserve for the common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+/* TX datapath to which the Common Pool is connected to. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+/* enum: Extracts information from function */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
+/* Network port or RX Engine to which the common pool connects. */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+/* enum: Extracts information from function */
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
+/* ID of the common pool allocated */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
+ * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
+ * previously allocated common pools.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
+#undef MC_CMD_0x11e_PRIVILEGE_CTG
+
+#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
+/* Common pool previously allocated to which the new vFIFO will be associated
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+/* Port or RX engine to associate the vFIFO egress */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+/* enum: Extracts information from common pool */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
+/* Minimum number of buffers that the pool must have */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+/* enum: Do not check the space available */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
+/* Will the vFIFO be used as TX_vFIFO_ULL */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+/* Network priority of the vFIFO,if applicable */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+/* enum: Search for the lowest unused priority */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
+/* Short vFIFO ID */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+/* Network priority of the vFIFO */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+
+
+/***********************************/
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF
+ * This interface clears the configuration of the given vFIFO and leaves it
+ * ready to be re-used.
+ */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
+#undef MC_CMD_0x11f_PRIVILEGE_CTG
+
+#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
+/* Short vFIFO ID */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
+ * This interface clears the configuration of the given common pool and leaves
+ * it ready to be re-used.
+ */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
+#undef MC_CMD_0x121_PRIVILEGE_CTG
+
+#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
+/* Common pool ID given when pool allocated */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_REKEY
+ * This request causes the NIC to generate a new per-NIC key and program it
+ * into the write-once memory. During the process all flash partitions that are
+ * protected with a CMAC are verified with the old per-NIC key and then signed
+ * with the new per-NIC key. If the NIC has already reached its rekey limit the
+ * REKEY op will return MC_CMD_ERR_ERANGE. The REKEY op may block until
+ * completion or it may return 0 and continue processing, therefore the caller
+ * must poll at least once to confirm that the rekeying has completed. The POLL
+ * operation returns MC_CMD_ERR_EBUSY if the rekey process is still running
+ * otherwise it will return the result of the last completed rekey operation,
+ * or 0 if there has not been a previous rekey.
+ */
+#define MC_CMD_REKEY 0x123
+#undef MC_CMD_0x123_PRIVILEGE_CTG
+
+#define MC_CMD_0x123_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REKEY_IN msgrequest */
+#define MC_CMD_REKEY_IN_LEN 4
+/* the type of operation requested */
+#define MC_CMD_REKEY_IN_OP_OFST 0
+/* enum: Start the rekeying operation */
+#define MC_CMD_REKEY_IN_OP_REKEY 0x0
+/* enum: Poll for completion of the rekeying operation */
+#define MC_CMD_REKEY_IN_OP_POLL 0x1
+
+/* MC_CMD_REKEY_OUT msgresponse */
+#define MC_CMD_REKEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
+ * This interface allows the host to find out how many common pool buffers are
+ * not yet assigned.
+ */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
+#undef MC_CMD_0x124_PRIVILEGE_CTG
+
+#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
+/* Available buffers for the ENG to NET vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_SECURITY_FUSES
+ * Change the security level of the adapter by setting bits in the write-once
+ * memory. The firmware maps each flag in the message to a set of one or more
+ * hardware-defined or software-defined bits and sets these bits in the write-
+ * once memory. For Medford the hardware-defined bits are defined in
+ * SF-112079-PS 5.3, the software-defined bits are defined in xpm.h. Returns 0
+ * if all of the required bits were set and returns MC_CMD_ERR_EIO if any of
+ * the required bits were not set.
+ */
+#define MC_CMD_SET_SECURITY_FUSES 0x126
+#undef MC_CMD_0x126_PRIVILEGE_CTG
+
+#define MC_CMD_0x126_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SECURITY_FUSES_IN msgrequest */
+#define MC_CMD_SET_SECURITY_FUSES_IN_LEN 4
+/* Flags specifying what type of security features are being set */
+#define MC_CMD_SET_SECURITY_FUSES_IN_FLAGS_OFST 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_LBN 0
+#define MC_CMD_SET_SECURITY_FUSES_IN_SECURE_BOOT_WIDTH 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_LBN 1
+#define MC_CMD_SET_SECURITY_FUSES_IN_REJECT_TEST_SIGNED_WIDTH 1
+
+/* MC_CMD_SET_SECURITY_FUSES_OUT msgresponse */
+#define MC_CMD_SET_SECURITY_FUSES_OUT_LEN 0
+
+#endif /* _SIENA_MC_DRIVER_PCOL_H */
+/*! \cidoxg_end */
diff --git a/drivers/net/sfc/base/efx_regs_pci.h b/drivers/net/sfc/base/efx_regs_pci.h
new file mode 100644
index 00000000..f90f9565
--- /dev/null
+++ b/drivers/net/sfc/base/efx_regs_pci.h
@@ -0,0 +1,2356 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_EFX_REGS_PCI_H
+#define _SYS_EFX_REGS_PCI_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * PC_VEND_ID_REG(16bit):
+ * Vendor ID register
+ */
+
+#define PCR_AZ_VEND_ID_REG 0x00000000
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VEND_ID_LBN 0
+#define PCRF_AZ_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_DEV_ID_REG(16bit):
+ * Device ID register
+ */
+
+#define PCR_AZ_DEV_ID_REG 0x00000002
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DEV_ID_LBN 0
+#define PCRF_AZ_DEV_ID_WIDTH 16
+
+
+/*
+ * PC_CMD_REG(16bit):
+ * Command register
+ */
+
+#define PCR_AZ_CMD_REG 0x00000004
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INTX_DIS_LBN 10
+#define PCRF_AZ_INTX_DIS_WIDTH 1
+#define PCRF_AZ_FB2B_EN_LBN 9
+#define PCRF_AZ_FB2B_EN_WIDTH 1
+#define PCRF_AZ_SERR_EN_LBN 8
+#define PCRF_AZ_SERR_EN_WIDTH 1
+#define PCRF_AZ_IDSEL_CTL_LBN 7
+#define PCRF_AZ_IDSEL_CTL_WIDTH 1
+#define PCRF_AZ_PERR_EN_LBN 6
+#define PCRF_AZ_PERR_EN_WIDTH 1
+#define PCRF_AZ_VGA_PAL_SNP_LBN 5
+#define PCRF_AZ_VGA_PAL_SNP_WIDTH 1
+#define PCRF_AZ_MWI_EN_LBN 4
+#define PCRF_AZ_MWI_EN_WIDTH 1
+#define PCRF_AZ_SPEC_CYC_LBN 3
+#define PCRF_AZ_SPEC_CYC_WIDTH 1
+#define PCRF_AZ_MST_EN_LBN 2
+#define PCRF_AZ_MST_EN_WIDTH 1
+#define PCRF_AZ_MEM_EN_LBN 1
+#define PCRF_AZ_MEM_EN_WIDTH 1
+#define PCRF_AZ_IO_EN_LBN 0
+#define PCRF_AZ_IO_EN_WIDTH 1
+
+
+/*
+ * PC_STAT_REG(16bit):
+ * Status register
+ */
+
+#define PCR_AZ_STAT_REG 0x00000006
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_DET_PERR_LBN 15
+#define PCRF_AZ_DET_PERR_WIDTH 1
+#define PCRF_AZ_SIG_SERR_LBN 14
+#define PCRF_AZ_SIG_SERR_WIDTH 1
+#define PCRF_AZ_GOT_MABRT_LBN 13
+#define PCRF_AZ_GOT_MABRT_WIDTH 1
+#define PCRF_AZ_GOT_TABRT_LBN 12
+#define PCRF_AZ_GOT_TABRT_WIDTH 1
+#define PCRF_AZ_SIG_TABRT_LBN 11
+#define PCRF_AZ_SIG_TABRT_WIDTH 1
+#define PCRF_AZ_DEVSEL_TIM_LBN 9
+#define PCRF_AZ_DEVSEL_TIM_WIDTH 2
+#define PCRF_AZ_MDAT_PERR_LBN 8
+#define PCRF_AZ_MDAT_PERR_WIDTH 1
+#define PCRF_AZ_FB2B_CAP_LBN 7
+#define PCRF_AZ_FB2B_CAP_WIDTH 1
+#define PCRF_AZ_66MHZ_CAP_LBN 5
+#define PCRF_AZ_66MHZ_CAP_WIDTH 1
+#define PCRF_AZ_CAP_LIST_LBN 4
+#define PCRF_AZ_CAP_LIST_WIDTH 1
+#define PCRF_AZ_INTX_STAT_LBN 3
+#define PCRF_AZ_INTX_STAT_WIDTH 1
+
+
+/*
+ * PC_REV_ID_REG(8bit):
+ * Class code & revision ID register
+ */
+
+#define PCR_AZ_REV_ID_REG 0x00000008
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_REV_ID_LBN 0
+#define PCRF_AZ_REV_ID_WIDTH 8
+
+
+/*
+ * PC_CC_REG(24bit):
+ * Class code register
+ */
+
+#define PCR_AZ_CC_REG 0x00000009
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BASE_CC_LBN 16
+#define PCRF_AZ_BASE_CC_WIDTH 8
+#define PCRF_AZ_SUB_CC_LBN 8
+#define PCRF_AZ_SUB_CC_WIDTH 8
+#define PCRF_AZ_PROG_IF_LBN 0
+#define PCRF_AZ_PROG_IF_WIDTH 8
+
+
+/*
+ * PC_CACHE_LSIZE_REG(8bit):
+ * Cache line size
+ */
+
+#define PCR_AZ_CACHE_LSIZE_REG 0x0000000c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CACHE_LSIZE_LBN 0
+#define PCRF_AZ_CACHE_LSIZE_WIDTH 8
+
+
+/*
+ * PC_MST_LAT_REG(8bit):
+ * Master latency timer register
+ */
+
+#define PCR_AZ_MST_LAT_REG 0x0000000d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MST_LAT_LBN 0
+#define PCRF_AZ_MST_LAT_WIDTH 8
+
+
+/*
+ * PC_HDR_TYPE_REG(8bit):
+ * Header type register
+ */
+
+#define PCR_AZ_HDR_TYPE_REG 0x0000000e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MULT_FUNC_LBN 7
+#define PCRF_AZ_MULT_FUNC_WIDTH 1
+#define PCRF_AZ_TYPE_LBN 0
+#define PCRF_AZ_TYPE_WIDTH 7
+
+
+/*
+ * PC_BIST_REG(8bit):
+ * BIST register
+ */
+
+#define PCR_AZ_BIST_REG 0x0000000f
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BIST_LBN 0
+#define PCRF_AZ_BIST_WIDTH 8
+
+
+/*
+ * PC_BAR0_REG(32bit):
+ * Primary function base address register 0
+ */
+
+#define PCR_AZ_BAR0_REG 0x00000010
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR0_LBN 4
+#define PCRF_AZ_BAR0_WIDTH 28
+#define PCRF_AZ_BAR0_PREF_LBN 3
+#define PCRF_AZ_BAR0_PREF_WIDTH 1
+#define PCRF_AZ_BAR0_TYPE_LBN 1
+#define PCRF_AZ_BAR0_TYPE_WIDTH 2
+#define PCRF_AZ_BAR0_IOM_LBN 0
+#define PCRF_AZ_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR1_REG(32bit):
+ * Primary function base address register 1, BAR1 is not implemented so read only.
+ */
+
+#define PCR_DZ_BAR1_REG 0x00000014
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_BAR1_LBN 0
+#define PCRF_DZ_BAR1_WIDTH 32
+
+
+/*
+ * PC_BAR2_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_AZ_BAR2_LO_REG 0x00000018
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_LO_LBN 4
+#define PCRF_AZ_BAR2_LO_WIDTH 28
+#define PCRF_AZ_BAR2_PREF_LBN 3
+#define PCRF_AZ_BAR2_PREF_WIDTH 1
+#define PCRF_AZ_BAR2_TYPE_LBN 1
+#define PCRF_AZ_BAR2_TYPE_WIDTH 2
+#define PCRF_AZ_BAR2_IOM_LBN 0
+#define PCRF_AZ_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR2_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_AZ_BAR2_HI_REG 0x0000001c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_BAR2_HI_LBN 0
+#define PCRF_AZ_BAR2_HI_WIDTH 32
+
+
+/*
+ * PC_BAR4_LO_REG(32bit):
+ * Primary function base address register 2 low bits
+ */
+
+#define PCR_CZ_BAR4_LO_REG 0x00000020
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_LO_LBN 4
+#define PCRF_CZ_BAR4_LO_WIDTH 28
+#define PCRF_CZ_BAR4_PREF_LBN 3
+#define PCRF_CZ_BAR4_PREF_WIDTH 1
+#define PCRF_CZ_BAR4_TYPE_LBN 1
+#define PCRF_CZ_BAR4_TYPE_WIDTH 2
+#define PCRF_CZ_BAR4_IOM_LBN 0
+#define PCRF_CZ_BAR4_IOM_WIDTH 1
+
+
+/*
+ * PC_BAR4_HI_REG(32bit):
+ * Primary function base address register 2 high bits
+ */
+
+#define PCR_CZ_BAR4_HI_REG 0x00000024
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_BAR4_HI_LBN 0
+#define PCRF_CZ_BAR4_HI_WIDTH 32
+
+
+/*
+ * PC_SS_VEND_ID_REG(16bit):
+ * Sub-system vendor ID register
+ */
+
+#define PCR_AZ_SS_VEND_ID_REG 0x0000002c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_VEND_ID_LBN 0
+#define PCRF_AZ_SS_VEND_ID_WIDTH 16
+
+
+/*
+ * PC_SS_ID_REG(16bit):
+ * Sub-system ID register
+ */
+
+#define PCR_AZ_SS_ID_REG 0x0000002e
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SS_ID_LBN 0
+#define PCRF_AZ_SS_ID_WIDTH 16
+
+
+/*
+ * PC_EXPROM_BAR_REG(32bit):
+ * Expansion ROM base address register
+ */
+
+#define PCR_AZ_EXPROM_BAR_REG 0x00000030
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXPROM_BAR_LBN 11
+#define PCRF_AZ_EXPROM_BAR_WIDTH 21
+#define PCRF_AB_EXPROM_MIN_SIZE_LBN 2
+#define PCRF_AB_EXPROM_MIN_SIZE_WIDTH 9
+#define PCRF_CZ_EXPROM_MIN_SIZE_LBN 1
+#define PCRF_CZ_EXPROM_MIN_SIZE_WIDTH 10
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_LBN 1
+#define PCRF_AB_EXPROM_FEATURE_ENABLE_WIDTH 1
+#define PCRF_AZ_EXPROM_EN_LBN 0
+#define PCRF_AZ_EXPROM_EN_WIDTH 1
+
+
+/*
+ * PC_CAP_PTR_REG(8bit):
+ * Capability pointer register
+ */
+
+#define PCR_AZ_CAP_PTR_REG 0x00000034
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_CAP_PTR_LBN 0
+#define PCRF_AZ_CAP_PTR_WIDTH 8
+
+
+/*
+ * PC_INT_LINE_REG(8bit):
+ * Interrupt line register
+ */
+
+#define PCR_AZ_INT_LINE_REG 0x0000003c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_LINE_LBN 0
+#define PCRF_AZ_INT_LINE_WIDTH 8
+
+
+/*
+ * PC_INT_PIN_REG(8bit):
+ * Interrupt pin register
+ */
+
+#define PCR_AZ_INT_PIN_REG 0x0000003d
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_INT_PIN_LBN 0
+#define PCRF_AZ_INT_PIN_WIDTH 8
+#define PCFE_DZ_INTPIN_INTD 4
+#define PCFE_DZ_INTPIN_INTC 3
+#define PCFE_DZ_INTPIN_INTB 2
+#define PCFE_DZ_INTPIN_INTA 1
+
+
+/*
+ * PC_PM_CAP_ID_REG(8bit):
+ * Power management capability ID
+ */
+
+#define PCR_AZ_PM_CAP_ID_REG 0x00000040
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_CAP_ID_LBN 0
+#define PCRF_AZ_PM_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PM_NXT_PTR_REG(8bit):
+ * Power management next item pointer
+ */
+
+#define PCR_AZ_PM_NXT_PTR_REG 0x00000041
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_NXT_PTR_LBN 0
+#define PCRF_AZ_PM_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_PM_CAP_REG(16bit):
+ * Power management capabilities register
+ */
+
+#define PCR_AZ_PM_CAP_REG 0x00000042
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_SUPT_LBN 11
+#define PCRF_AZ_PM_PME_SUPT_WIDTH 5
+#define PCRF_AZ_PM_D2_SUPT_LBN 10
+#define PCRF_AZ_PM_D2_SUPT_WIDTH 1
+#define PCRF_AZ_PM_D1_SUPT_LBN 9
+#define PCRF_AZ_PM_D1_SUPT_WIDTH 1
+#define PCRF_AZ_PM_AUX_CURR_LBN 6
+#define PCRF_AZ_PM_AUX_CURR_WIDTH 3
+#define PCRF_AZ_PM_DSI_LBN 5
+#define PCRF_AZ_PM_DSI_WIDTH 1
+#define PCRF_AZ_PM_PME_CLK_LBN 3
+#define PCRF_AZ_PM_PME_CLK_WIDTH 1
+#define PCRF_AZ_PM_PME_VER_LBN 0
+#define PCRF_AZ_PM_PME_VER_WIDTH 3
+
+
+/*
+ * PC_PM_CS_REG(16bit):
+ * Power management control & status register
+ */
+
+#define PCR_AZ_PM_CS_REG 0x00000044
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PM_PME_STAT_LBN 15
+#define PCRF_AZ_PM_PME_STAT_WIDTH 1
+#define PCRF_AZ_PM_DAT_SCALE_LBN 13
+#define PCRF_AZ_PM_DAT_SCALE_WIDTH 2
+#define PCRF_AZ_PM_DAT_SEL_LBN 9
+#define PCRF_AZ_PM_DAT_SEL_WIDTH 4
+#define PCRF_AZ_PM_PME_EN_LBN 8
+#define PCRF_AZ_PM_PME_EN_WIDTH 1
+#define PCRF_CZ_NO_SOFT_RESET_LBN 3
+#define PCRF_CZ_NO_SOFT_RESET_WIDTH 1
+#define PCRF_AZ_PM_PWR_ST_LBN 0
+#define PCRF_AZ_PM_PWR_ST_WIDTH 2
+
+
+/*
+ * PC_MSI_CAP_ID_REG(8bit):
+ * MSI capability ID
+ */
+
+#define PCR_AZ_MSI_CAP_ID_REG 0x00000050
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_CAP_ID_LBN 0
+#define PCRF_AZ_MSI_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSI_NXT_PTR_REG(8bit):
+ * MSI next item pointer
+ */
+
+#define PCR_AZ_MSI_NXT_PTR_REG 0x00000051
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_NXT_PTR_LBN 0
+#define PCRF_AZ_MSI_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSI_CTL_REG(16bit):
+ * MSI control register
+ */
+
+#define PCR_AZ_MSI_CTL_REG 0x00000052
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_64_EN_LBN 7
+#define PCRF_AZ_MSI_64_EN_WIDTH 1
+#define PCRF_AZ_MSI_MULT_MSG_EN_LBN 4
+#define PCRF_AZ_MSI_MULT_MSG_EN_WIDTH 3
+#define PCRF_AZ_MSI_MULT_MSG_CAP_LBN 1
+#define PCRF_AZ_MSI_MULT_MSG_CAP_WIDTH 3
+#define PCRF_AZ_MSI_EN_LBN 0
+#define PCRF_AZ_MSI_EN_WIDTH 1
+
+
+/*
+ * PC_MSI_ADR_LO_REG(32bit):
+ * MSI low 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_LO_REG 0x00000054
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_LO_LBN 2
+#define PCRF_AZ_MSI_ADR_LO_WIDTH 30
+
+
+/*
+ * PC_MSI_ADR_HI_REG(32bit):
+ * MSI high 32 bits address register
+ */
+
+#define PCR_AZ_MSI_ADR_HI_REG 0x00000058
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_ADR_HI_LBN 0
+#define PCRF_AZ_MSI_ADR_HI_WIDTH 32
+
+
+/*
+ * PC_MSI_DAT_REG(16bit):
+ * MSI data register
+ */
+
+#define PCR_AZ_MSI_DAT_REG 0x0000005c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_MSI_DAT_LBN 0
+#define PCRF_AZ_MSI_DAT_WIDTH 16
+
+
+/*
+ * PC_PCIE_CAP_LIST_REG(16bit):
+ * PCIe capability list register
+ */
+
+#define PCR_AB_PCIE_CAP_LIST_REG 0x00000060
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_LIST_REG 0x00000070
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_NXT_PTR_LBN 8
+#define PCRF_AZ_PCIE_NXT_PTR_WIDTH 8
+#define PCRF_AZ_PCIE_CAP_ID_LBN 0
+#define PCRF_AZ_PCIE_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_PCIE_CAP_REG(16bit):
+ * PCIe capability register
+ */
+
+#define PCR_AB_PCIE_CAP_REG 0x00000062
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_PCIE_CAP_REG 0x00000072
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PCIE_INT_MSG_NUM_LBN 9
+#define PCRF_AZ_PCIE_INT_MSG_NUM_WIDTH 5
+#define PCRF_AZ_PCIE_SLOT_IMP_LBN 8
+#define PCRF_AZ_PCIE_SLOT_IMP_WIDTH 1
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_LBN 4
+#define PCRF_AZ_PCIE_DEV_PORT_TYPE_WIDTH 4
+#define PCRF_AZ_PCIE_CAP_VER_LBN 0
+#define PCRF_AZ_PCIE_CAP_VER_WIDTH 4
+
+
+/*
+ * PC_DEV_CAP_REG(32bit):
+ * PCIe device capabilities register
+ */
+
+#define PCR_AB_DEV_CAP_REG 0x00000064
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CAP_REG 0x00000074
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_LBN 28
+#define PCRF_CZ_CAP_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_LBN 26
+#define PCRF_AZ_CAP_SLOT_PWR_SCL_WIDTH 2
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_LBN 18
+#define PCRF_AZ_CAP_SLOT_PWR_VAL_WIDTH 8
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_LBN 15
+#define PCRF_CZ_ROLE_BASE_ERR_REPORTING_WIDTH 1
+#define PCRF_AB_PWR_IND_LBN 14
+#define PCRF_AB_PWR_IND_WIDTH 1
+#define PCRF_AB_ATTN_IND_LBN 13
+#define PCRF_AB_ATTN_IND_WIDTH 1
+#define PCRF_AB_ATTN_BUTTON_LBN 12
+#define PCRF_AB_ATTN_BUTTON_WIDTH 1
+#define PCRF_AZ_ENDPT_L1_LAT_LBN 9
+#define PCRF_AZ_ENDPT_L1_LAT_WIDTH 3
+#define PCRF_AZ_ENDPT_L0_LAT_LBN 6
+#define PCRF_AZ_ENDPT_L0_LAT_WIDTH 3
+#define PCRF_AZ_TAG_FIELD_LBN 5
+#define PCRF_AZ_TAG_FIELD_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_LBN 3
+#define PCRF_AZ_PHAN_FUNC_WIDTH 2
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_LBN 0
+#define PCRF_AZ_MAX_PAYL_SIZE_SUPT_WIDTH 3
+
+
+/*
+ * PC_DEV_CTL_REG(16bit):
+ * PCIe device control register
+ */
+
+#define PCR_AB_DEV_CTL_REG 0x00000068
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_CTL_REG 0x00000078
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_FN_LEVEL_RESET_LBN 15
+#define PCRF_CZ_FN_LEVEL_RESET_WIDTH 1
+#define PCRF_AZ_MAX_RD_REQ_SIZE_LBN 12
+#define PCRF_AZ_MAX_RD_REQ_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_4096 5
+#define PCFE_AZ_MAX_RD_REQ_SIZE_2048 4
+#define PCFE_AZ_MAX_RD_REQ_SIZE_1024 3
+#define PCFE_AZ_MAX_RD_REQ_SIZE_512 2
+#define PCFE_AZ_MAX_RD_REQ_SIZE_256 1
+#define PCFE_AZ_MAX_RD_REQ_SIZE_128 0
+#define PCRF_AZ_EN_NO_SNOOP_LBN 11
+#define PCRF_AZ_EN_NO_SNOOP_WIDTH 1
+#define PCRF_AZ_AUX_PWR_PM_EN_LBN 10
+#define PCRF_AZ_AUX_PWR_PM_EN_WIDTH 1
+#define PCRF_AZ_PHAN_FUNC_EN_LBN 9
+#define PCRF_AZ_PHAN_FUNC_EN_WIDTH 1
+#define PCRF_AB_DEV_CAP_REG_RSVD0_LBN 8
+#define PCRF_AB_DEV_CAP_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_EXTENDED_TAG_EN_LBN 8
+#define PCRF_CZ_EXTENDED_TAG_EN_WIDTH 1
+#define PCRF_AZ_MAX_PAYL_SIZE_LBN 5
+#define PCRF_AZ_MAX_PAYL_SIZE_WIDTH 3
+#define PCFE_AZ_MAX_PAYL_SIZE_4096 5
+#define PCFE_AZ_MAX_PAYL_SIZE_2048 4
+#define PCFE_AZ_MAX_PAYL_SIZE_1024 3
+#define PCFE_AZ_MAX_PAYL_SIZE_512 2
+#define PCFE_AZ_MAX_PAYL_SIZE_256 1
+#define PCFE_AZ_MAX_PAYL_SIZE_128 0
+#define PCRF_AZ_EN_RELAX_ORDER_LBN 4
+#define PCRF_AZ_EN_RELAX_ORDER_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_LBN 3
+#define PCRF_AZ_UNSUP_REQ_RPT_EN_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_RPT_EN_LBN 2
+#define PCRF_AZ_FATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_RPT_EN_WIDTH 1
+#define PCRF_AZ_CORR_ERR_RPT_EN_LBN 0
+#define PCRF_AZ_CORR_ERR_RPT_EN_WIDTH 1
+
+
+/*
+ * PC_DEV_STAT_REG(16bit):
+ * PCIe device status register
+ */
+
+#define PCR_AB_DEV_STAT_REG 0x0000006a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_DEV_STAT_REG 0x0000007a
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_TRNS_PEND_LBN 5
+#define PCRF_AZ_TRNS_PEND_WIDTH 1
+#define PCRF_AZ_AUX_PWR_DET_LBN 4
+#define PCRF_AZ_AUX_PWR_DET_WIDTH 1
+#define PCRF_AZ_UNSUP_REQ_DET_LBN 3
+#define PCRF_AZ_UNSUP_REQ_DET_WIDTH 1
+#define PCRF_AZ_FATAL_ERR_DET_LBN 2
+#define PCRF_AZ_FATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_NONFATAL_ERR_DET_LBN 1
+#define PCRF_AZ_NONFATAL_ERR_DET_WIDTH 1
+#define PCRF_AZ_CORR_ERR_DET_LBN 0
+#define PCRF_AZ_CORR_ERR_DET_WIDTH 1
+
+
+/*
+ * PC_LNK_CAP_REG(32bit):
+ * PCIe link capabilities register
+ */
+
+#define PCR_AB_LNK_CAP_REG 0x0000006c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CAP_REG 0x0000007c
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_PORT_NUM_LBN 24
+#define PCRF_AZ_PORT_NUM_WIDTH 8
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_LBN 22
+#define PCRF_DZ_ASPM_OPTIONALITY_CAP_WIDTH 1
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_LBN 21
+#define PCRF_CZ_LINK_BWDITH_NOTIF_CAP_WIDTH 1
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_LBN 20
+#define PCRF_CZ_DATA_LINK_ACTIVE_RPT_CAP_WIDTH 1
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_LBN 19
+#define PCRF_CZ_SURPISE_DOWN_RPT_CAP_WIDTH 1
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_LBN 18
+#define PCRF_CZ_CLOCK_PWR_MNGMNT_CAP_WIDTH 1
+#define PCRF_AZ_DEF_L1_EXIT_LAT_LBN 15
+#define PCRF_AZ_DEF_L1_EXIT_LAT_WIDTH 3
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_LBN 12
+#define PCRF_AZ_DEF_L0_EXIT_LATPORT_NUM_WIDTH 3
+#define PCRF_AZ_AS_LNK_PM_SUPT_LBN 10
+#define PCRF_AZ_AS_LNK_PM_SUPT_WIDTH 2
+#define PCRF_AZ_MAX_LNK_WIDTH_LBN 4
+#define PCRF_AZ_MAX_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_MAX_LNK_SP_LBN 0
+#define PCRF_AZ_MAX_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_LNK_CTL_REG(16bit):
+ * PCIe link control register
+ */
+
+#define PCR_AB_LNK_CTL_REG 0x00000070
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_CTL_REG 0x00000080
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_EXT_SYNC_LBN 7
+#define PCRF_AZ_EXT_SYNC_WIDTH 1
+#define PCRF_AZ_COMM_CLK_CFG_LBN 6
+#define PCRF_AZ_COMM_CLK_CFG_WIDTH 1
+#define PCRF_AB_LNK_CTL_REG_RSVD0_LBN 5
+#define PCRF_AB_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_CZ_LNK_RETRAIN_LBN 5
+#define PCRF_CZ_LNK_RETRAIN_WIDTH 1
+#define PCRF_AZ_LNK_DIS_LBN 4
+#define PCRF_AZ_LNK_DIS_WIDTH 1
+#define PCRF_AZ_RD_COM_BDRY_LBN 3
+#define PCRF_AZ_RD_COM_BDRY_WIDTH 1
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_LBN 0
+#define PCRF_AZ_ACT_ST_LNK_PM_CTL_WIDTH 2
+
+
+/*
+ * PC_LNK_STAT_REG(16bit):
+ * PCIe link status register
+ */
+
+#define PCR_AB_LNK_STAT_REG 0x00000072
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_LNK_STAT_REG 0x00000082
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_SLOT_CLK_CFG_LBN 12
+#define PCRF_AZ_SLOT_CLK_CFG_WIDTH 1
+#define PCRF_AZ_LNK_TRAIN_LBN 11
+#define PCRF_AZ_LNK_TRAIN_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_LBN 10
+#define PCRF_AB_TRAIN_ERR_WIDTH 1
+#define PCRF_AZ_LNK_WIDTH_LBN 4
+#define PCRF_AZ_LNK_WIDTH_WIDTH 6
+#define PCRF_AZ_LNK_SP_LBN 0
+#define PCRF_AZ_LNK_SP_WIDTH 4
+
+
+/*
+ * PC_SLOT_CAP_REG(32bit):
+ * PCIe slot capabilities register
+ */
+
+#define PCR_AB_SLOT_CAP_REG 0x00000074
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_NUM_LBN 19
+#define PCRF_AB_SLOT_NUM_WIDTH 13
+#define PCRF_AB_SLOT_PWR_LIM_SCL_LBN 15
+#define PCRF_AB_SLOT_PWR_LIM_SCL_WIDTH 2
+#define PCRF_AB_SLOT_PWR_LIM_VAL_LBN 7
+#define PCRF_AB_SLOT_PWR_LIM_VAL_WIDTH 8
+#define PCRF_AB_SLOT_HP_CAP_LBN 6
+#define PCRF_AB_SLOT_HP_CAP_WIDTH 1
+#define PCRF_AB_SLOT_HP_SURP_LBN 5
+#define PCRF_AB_SLOT_HP_SURP_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_PRST_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_PRST_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_PRST_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_PRST_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_PRST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_LBN 1
+#define PCRF_AB_SLOT_PWR_CTL_PRST_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_PRST_WIDTH 1
+
+
+/*
+ * PC_SLOT_CTL_REG(16bit):
+ * PCIe slot control register
+ */
+
+#define PCR_AB_SLOT_CTL_REG 0x00000078
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_LBN 10
+#define PCRF_AB_SLOT_PWR_CTLR_CTL_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_CTL_LBN 8
+#define PCRF_AB_SLOT_PWR_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_ATT_IND_CTL_LBN 6
+#define PCRF_AB_SLOT_ATT_IND_CTL_WIDTH 2
+#define PCRF_AB_SLOT_HP_INT_EN_LBN 5
+#define PCRF_AB_SLOT_HP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_LBN 4
+#define PCRF_AB_SLOT_CMD_COMP_INT_EN_WIDTH 1
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_LBN 3
+#define PCRF_AB_SLOT_PRES_DET_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_CHG_EN_WIDTH 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_LBN 1
+#define PCRF_AB_SLOT_PWR_FLTDET_EN_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_BUT_EN_LBN 0
+#define PCRF_AB_SLOT_ATTN_BUT_EN_WIDTH 1
+
+
+/*
+ * PC_SLOT_STAT_REG(16bit):
+ * PCIe slot status register
+ */
+
+#define PCR_AB_SLOT_STAT_REG 0x0000007a
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_PRES_DET_ST_LBN 6
+#define PCRF_AB_PRES_DET_ST_WIDTH 1
+#define PCRF_AB_MRL_SENS_ST_LBN 5
+#define PCRF_AB_MRL_SENS_ST_WIDTH 1
+#define PCRF_AB_SLOT_PWR_IND_LBN 4
+#define PCRF_AB_SLOT_PWR_IND_WIDTH 1
+#define PCRF_AB_SLOT_ATTN_IND_LBN 3
+#define PCRF_AB_SLOT_ATTN_IND_WIDTH 1
+#define PCRF_AB_SLOT_MRL_SENS_LBN 2
+#define PCRF_AB_SLOT_MRL_SENS_WIDTH 1
+#define PCRF_AB_PWR_FLTDET_LBN 1
+#define PCRF_AB_PWR_FLTDET_WIDTH 1
+#define PCRF_AB_ATTN_BUTDET_LBN 0
+#define PCRF_AB_ATTN_BUTDET_WIDTH 1
+
+
+/*
+ * PC_MSIX_CAP_ID_REG(8bit):
+ * MSIX Capability ID
+ */
+
+#define PCR_BB_MSIX_CAP_ID_REG 0x00000090
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CAP_ID_REG 0x000000b0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_CAP_ID_LBN 0
+#define PCRF_BZ_MSIX_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_MSIX_NXT_PTR_REG(8bit):
+ * MSIX Capability Next Capability Ptr
+ */
+
+#define PCR_BB_MSIX_NXT_PTR_REG 0x00000091
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_NXT_PTR_REG 0x000000b1
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_NXT_PTR_LBN 0
+#define PCRF_BZ_MSIX_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_MSIX_CTL_REG(16bit):
+ * MSIX control register
+ */
+
+#define PCR_BB_MSIX_CTL_REG 0x00000092
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_CTL_REG 0x000000b2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_EN_LBN 15
+#define PCRF_BZ_MSIX_EN_WIDTH 1
+#define PCRF_BZ_MSIX_FUNC_MASK_LBN 14
+#define PCRF_BZ_MSIX_FUNC_MASK_WIDTH 1
+#define PCRF_BZ_MSIX_TBL_SIZE_LBN 0
+#define PCRF_BZ_MSIX_TBL_SIZE_WIDTH 11
+
+
+/*
+ * PC_MSIX_TBL_BASE_REG(32bit):
+ * MSIX Capability Vector Table Base
+ */
+
+#define PCR_BB_MSIX_TBL_BASE_REG 0x00000094
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_TBL_BASE_REG 0x000000b4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_TBL_OFF_LBN 3
+#define PCRF_BZ_MSIX_TBL_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_TBL_BIR_LBN 0
+#define PCRF_BZ_MSIX_TBL_BIR_WIDTH 3
+
+
+/*
+ * PC_DEV_CAP2_REG(32bit):
+ * PCIe Device Capabilities 2
+ */
+
+#define PCR_CZ_DEV_CAP2_REG 0x00000094
+/* sienaa0=pci_f0_config,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_SUPPORTED_LBN 18
+#define PCRF_DZ_OBFF_SUPPORTED_WIDTH 2
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_LBN 12
+#define PCRF_DZ_TPH_CMPL_SUPPORTED_WIDTH 2
+#define PCRF_DZ_LTR_M_SUPPORTED_LBN 11
+#define PCRF_DZ_LTR_M_SUPPORTED_WIDTH 1
+#define PCRF_CC_CMPL_TIMEOUT_DIS_LBN 4
+#define PCRF_CC_CMPL_TIMEOUT_DIS_WIDTH 1
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_LBN 4
+#define PCRF_DZ_CMPL_TIMEOUT_DIS_SUPPORTED_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_WIDTH 4
+#define PCFE_CZ_CMPL_TIMEOUT_17000_TO_6400MS 14
+#define PCFE_CZ_CMPL_TIMEOUT_4000_TO_1300MS 13
+#define PCFE_CZ_CMPL_TIMEOUT_1000_TO_3500MS 10
+#define PCFE_CZ_CMPL_TIMEOUT_260_TO_900MS 9
+#define PCFE_CZ_CMPL_TIMEOUT_65_TO_210MS 6
+#define PCFE_CZ_CMPL_TIMEOUT_16_TO_55MS 5
+#define PCFE_CZ_CMPL_TIMEOUT_1_TO_10MS 2
+#define PCFE_CZ_CMPL_TIMEOUT_50_TO_100US 1
+#define PCFE_CZ_CMPL_TIMEOUT_DEFAULT 0
+
+
+/*
+ * PC_DEV_CTL2_REG(16bit):
+ * PCIe Device Control 2
+ */
+
+#define PCR_CZ_DEV_CTL2_REG 0x00000098
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_OBFF_ENABLE_LBN 13
+#define PCRF_DZ_OBFF_ENABLE_WIDTH 2
+#define PCRF_DZ_LTR_ENABLE_LBN 10
+#define PCRF_DZ_LTR_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_LBN 9
+#define PCRF_DZ_IDO_COMPLETION_ENABLE_WIDTH 1
+#define PCRF_DZ_IDO_REQUEST_ENABLE_LBN 8
+#define PCRF_DZ_IDO_REQUEST_ENABLE_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_LBN 4
+#define PCRF_CZ_CMPL_TIMEOUT_DIS_CTL_WIDTH 1
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_LBN 0
+#define PCRF_CZ_CMPL_TIMEOUT_CTL_WIDTH 4
+
+
+/*
+ * PC_MSIX_PBA_BASE_REG(32bit):
+ * MSIX Capability PBA Base
+ */
+
+#define PCR_BB_MSIX_PBA_BASE_REG 0x00000098
+/* falconb0=pci_f0_config */
+
+#define PCR_CZ_MSIX_PBA_BASE_REG 0x000000b8
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_BZ_MSIX_PBA_OFF_LBN 3
+#define PCRF_BZ_MSIX_PBA_OFF_WIDTH 29
+#define PCRF_BZ_MSIX_PBA_BIR_LBN 0
+#define PCRF_BZ_MSIX_PBA_BIR_WIDTH 3
+
+
+/*
+ * PC_LNK_CAP2_REG(32bit):
+ * PCIe Link Capability 2
+ */
+
+#define PCR_DZ_LNK_CAP2_REG 0x0000009c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LNK_SPEED_SUP_LBN 1
+#define PCRF_DZ_LNK_SPEED_SUP_WIDTH 7
+
+
+/*
+ * PC_LNK_CTL2_REG(16bit):
+ * PCIe Link Control 2
+ */
+
+#define PCR_CZ_LNK_CTL2_REG 0x000000a0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_POLLING_DEEMPH_LVL_LBN 12
+#define PCRF_CZ_POLLING_DEEMPH_LVL_WIDTH 1
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_LBN 11
+#define PCRF_CZ_COMPLIANCE_SOS_CTL_WIDTH 1
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_LBN 10
+#define PCRF_CZ_ENTER_MODIFIED_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TRANSMIT_MARGIN_LBN 7
+#define PCRF_CZ_TRANSMIT_MARGIN_WIDTH 3
+#define PCRF_CZ_SELECT_DEEMPH_LBN 6
+#define PCRF_CZ_SELECT_DEEMPH_WIDTH 1
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_LBN 5
+#define PCRF_CZ_HW_AUTONOMOUS_SPEED_DIS_WIDTH 1
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_LBN 4
+#define PCRF_CZ_ENTER_COMPLIANCE_CTL_WIDTH 1
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_LBN 0
+#define PCRF_CZ_TGT_LNK_SPEED_CTL_WIDTH 4
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN3 3
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN2 2
+#define PCFE_DZ_LCTL2_TGT_SPEED_GEN1 1
+
+
+/*
+ * PC_LNK_STAT2_REG(16bit):
+ * PCIe Link Status 2
+ */
+
+#define PCR_CZ_LNK_STAT2_REG 0x000000a2
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_CURRENT_DEEMPH_LBN 0
+#define PCRF_CZ_CURRENT_DEEMPH_WIDTH 1
+
+
+/*
+ * PC_VPD_CAP_ID_REG(8bit):
+ * VPD data register
+ */
+
+#define PCR_AB_VPD_CAP_ID_REG 0x000000b0
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_CAP_ID_LBN 0
+#define PCRF_AB_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_VPD_NXT_PTR_REG(8bit):
+ * VPD next item pointer
+ */
+
+#define PCR_AB_VPD_NXT_PTR_REG 0x000000b1
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_NXT_PTR_LBN 0
+#define PCRF_AB_VPD_NXT_PTR_WIDTH 8
+
+
+/*
+ * PC_VPD_ADDR_REG(16bit):
+ * VPD address register
+ */
+
+#define PCR_AB_VPD_ADDR_REG 0x000000b2
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_VPD_FLAG_LBN 15
+#define PCRF_AB_VPD_FLAG_WIDTH 1
+#define PCRF_AB_VPD_ADDR_LBN 0
+#define PCRF_AB_VPD_ADDR_WIDTH 15
+
+
+/*
+ * PC_VPD_CAP_DATA_REG(32bit):
+ * documentation to be written for sum_PC_VPD_CAP_DATA_REG
+ */
+
+#define PCR_AB_VPD_CAP_DATA_REG 0x000000b4
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CZ_VPD_CAP_DATA_REG 0x000000d4
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_VPD_DATA_LBN 0
+#define PCRF_AZ_VPD_DATA_WIDTH 32
+
+
+/*
+ * PC_VPD_CAP_CTL_REG(8bit):
+ * VPD control and capabilities register
+ */
+
+#define PCR_CZ_VPD_CAP_CTL_REG 0x000000d0
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_VPD_FLAG_LBN 31
+#define PCRF_CZ_VPD_FLAG_WIDTH 1
+#define PCRF_CZ_VPD_ADDR_LBN 16
+#define PCRF_CZ_VPD_ADDR_WIDTH 15
+#define PCRF_CZ_VPD_NXT_PTR_LBN 8
+#define PCRF_CZ_VPD_NXT_PTR_WIDTH 8
+#define PCRF_CZ_VPD_CAP_ID_LBN 0
+#define PCRF_CZ_VPD_CAP_ID_WIDTH 8
+
+
+/*
+ * PC_AER_CAP_HDR_REG(32bit):
+ * AER capability header register
+ */
+
+#define PCR_AZ_AER_CAP_HDR_REG 0x00000100
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_LBN 20
+#define PCRF_AZ_AERCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_AZ_AERCAPHDR_VER_LBN 16
+#define PCRF_AZ_AERCAPHDR_VER_WIDTH 4
+#define PCRF_AZ_AERCAPHDR_ID_LBN 0
+#define PCRF_AZ_AERCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_AER_UNCORR_ERR_STAT_REG(32bit):
+ * AER Uncorrectable error status register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_STAT_REG 0x00000104
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_STAT_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_STAT_LBN 19
+#define PCRF_AZ_ECRC_ERR_STAT_WIDTH 1
+#define PCRF_AZ_MALF_TLP_STAT_LBN 18
+#define PCRF_AZ_MALF_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_OVF_STAT_LBN 17
+#define PCRF_AZ_RX_OVF_STAT_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_STAT_LBN 16
+#define PCRF_AZ_UNEXP_COMP_STAT_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_STAT_LBN 15
+#define PCRF_AZ_COMP_ABRT_STAT_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_STAT_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_STAT_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_STAT_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AZ_PSON_TLP_STAT_LBN 12
+#define PCRF_AZ_PSON_TLP_STAT_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_STAT_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_STAT_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_STAT_LBN 0
+#define PCRF_AB_TRAIN_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_MASK_REG(32bit):
+ * AER Uncorrectable error mask register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_MASK_REG 0x00000108
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_LBN 24
+#define PCRF_DZ_ATOMIC_OP_EGR_BLOCKED_MASK_WIDTH 1
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_LBN 22
+#define PCRF_DZ_UNCORR_INT_ERR_MASK_WIDTH 1
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_MASK_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_MASK_LBN 19
+#define PCRF_AZ_ECRC_ERR_MASK_WIDTH 1
+#define PCRF_AZ_MALF_TLP_MASK_LBN 18
+#define PCRF_AZ_MALF_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_OVF_MASK_LBN 17
+#define PCRF_AZ_RX_OVF_MASK_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_MASK_LBN 16
+#define PCRF_AZ_UNEXP_COMP_MASK_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_MASK_LBN 15
+#define PCRF_AZ_COMP_ABRT_MASK_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_MASK_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_MASK_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_MASK_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AZ_PSON_TLP_MASK_LBN 12
+#define PCRF_AZ_PSON_TLP_MASK_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_MASK_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_MASK_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_MASK_LBN 0
+#define PCRF_AB_TRAIN_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_UNCORR_ERR_SEV_REG(32bit):
+ * AER Uncorrectable error severity register
+ */
+
+#define PCR_AZ_AER_UNCORR_ERR_SEV_REG 0x0000010c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_LBN 20
+#define PCRF_AZ_UNSUPT_REQ_ERR_SEV_WIDTH 1
+#define PCRF_AZ_ECRC_ERR_SEV_LBN 19
+#define PCRF_AZ_ECRC_ERR_SEV_WIDTH 1
+#define PCRF_AZ_MALF_TLP_SEV_LBN 18
+#define PCRF_AZ_MALF_TLP_SEV_WIDTH 1
+#define PCRF_AZ_RX_OVF_SEV_LBN 17
+#define PCRF_AZ_RX_OVF_SEV_WIDTH 1
+#define PCRF_AZ_UNEXP_COMP_SEV_LBN 16
+#define PCRF_AZ_UNEXP_COMP_SEV_WIDTH 1
+#define PCRF_AZ_COMP_ABRT_SEV_LBN 15
+#define PCRF_AZ_COMP_ABRT_SEV_WIDTH 1
+#define PCRF_AZ_COMP_TIMEOUT_SEV_LBN 14
+#define PCRF_AZ_COMP_TIMEOUT_SEV_WIDTH 1
+#define PCRF_AZ_FC_PROTO_ERR_SEV_LBN 13
+#define PCRF_AZ_FC_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AZ_PSON_TLP_SEV_LBN 12
+#define PCRF_AZ_PSON_TLP_SEV_WIDTH 1
+#define PCRF_AZ_DL_PROTO_ERR_SEV_LBN 4
+#define PCRF_AZ_DL_PROTO_ERR_SEV_WIDTH 1
+#define PCRF_AB_TRAIN_ERR_SEV_LBN 0
+#define PCRF_AB_TRAIN_ERR_SEV_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_STAT_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_STAT_REG 0x00000110
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_STAT_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_STAT_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_STAT_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_STAT_LBN 7
+#define PCRF_AZ_BAD_DLLP_STAT_WIDTH 1
+#define PCRF_AZ_BAD_TLP_STAT_LBN 6
+#define PCRF_AZ_BAD_TLP_STAT_WIDTH 1
+#define PCRF_AZ_RX_ERR_STAT_LBN 0
+#define PCRF_AZ_RX_ERR_STAT_WIDTH 1
+
+
+/*
+ * PC_AER_CORR_ERR_MASK_REG(32bit):
+ * AER Correctable error status register
+ */
+
+#define PCR_AZ_AER_CORR_ERR_MASK_REG 0x00000114
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_LBN 13
+#define PCRF_CZ_ADVSY_NON_FATAL_MASK_WIDTH 1
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_LBN 12
+#define PCRF_AZ_RPLY_TMR_TOUT_MASK_WIDTH 1
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_LBN 8
+#define PCRF_AZ_RPLAY_NUM_RO_MASK_WIDTH 1
+#define PCRF_AZ_BAD_DLLP_MASK_LBN 7
+#define PCRF_AZ_BAD_DLLP_MASK_WIDTH 1
+#define PCRF_AZ_BAD_TLP_MASK_LBN 6
+#define PCRF_AZ_BAD_TLP_MASK_WIDTH 1
+#define PCRF_AZ_RX_ERR_MASK_LBN 0
+#define PCRF_AZ_RX_ERR_MASK_WIDTH 1
+
+
+/*
+ * PC_AER_CAP_CTL_REG(32bit):
+ * AER capability and control register
+ */
+
+#define PCR_AZ_AER_CAP_CTL_REG 0x00000118
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_ECRC_CHK_EN_LBN 8
+#define PCRF_AZ_ECRC_CHK_EN_WIDTH 1
+#define PCRF_AZ_ECRC_CHK_CAP_LBN 7
+#define PCRF_AZ_ECRC_CHK_CAP_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_EN_LBN 6
+#define PCRF_AZ_ECRC_GEN_EN_WIDTH 1
+#define PCRF_AZ_ECRC_GEN_CAP_LBN 5
+#define PCRF_AZ_ECRC_GEN_CAP_WIDTH 1
+#define PCRF_AZ_1ST_ERR_PTR_LBN 0
+#define PCRF_AZ_1ST_ERR_PTR_WIDTH 5
+
+
+/*
+ * PC_AER_HDR_LOG_REG(128bit):
+ * AER Header log register
+ */
+
+#define PCR_AZ_AER_HDR_LOG_REG 0x0000011c
+/* falcona0,falconb0,sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_AZ_HDR_LOG_LBN 0
+#define PCRF_AZ_HDR_LOG_WIDTH 128
+
+
+/*
+ * PC_DEVSN_CAP_HDR_REG(32bit):
+ * Device serial number capability header register
+ */
+
+#define PCR_CZ_DEVSN_CAP_HDR_REG 0x00000140
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_DEVSNCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_DEVSNCAPHDR_VER_LBN 16
+#define PCRF_CZ_DEVSNCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_DEVSNCAPHDR_ID_LBN 0
+#define PCRF_CZ_DEVSNCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_DEVSN_DWORD0_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD0_REG 0x00000144
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD0_LBN 0
+#define PCRF_CZ_DEVSN_DWORD0_WIDTH 32
+
+
+/*
+ * PC_DEVSN_DWORD1_REG(32bit):
+ * Device serial number DWORD0
+ */
+
+#define PCR_CZ_DEVSN_DWORD1_REG 0x00000148
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_DEVSN_DWORD1_LBN 0
+#define PCRF_CZ_DEVSN_DWORD1_WIDTH 32
+
+
+/*
+ * PC_ARI_CAP_HDR_REG(32bit):
+ * ARI capability header register
+ */
+
+#define PCR_CZ_ARI_CAP_HDR_REG 0x00000150
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_ARICAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_ARICAPHDR_VER_LBN 16
+#define PCRF_CZ_ARICAPHDR_VER_WIDTH 4
+#define PCRF_CZ_ARICAPHDR_ID_LBN 0
+#define PCRF_CZ_ARICAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_ARI_CAP_REG(16bit):
+ * ARI Capabilities
+ */
+
+#define PCR_CZ_ARI_CAP_REG 0x00000154
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_NXT_FN_NUM_LBN 8
+#define PCRF_CZ_ARI_NXT_FN_NUM_WIDTH 8
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_CAP_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_CAP_WIDTH 1
+
+
+/*
+ * PC_ARI_CTL_REG(16bit):
+ * ARI Control
+ */
+
+#define PCR_CZ_ARI_CTL_REG 0x00000156
+/* sienaa0,hunta0=pci_f0_config */
+
+#define PCRF_CZ_ARI_FN_GRP_LBN 4
+#define PCRF_CZ_ARI_FN_GRP_WIDTH 3
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_LBN 1
+#define PCRF_CZ_ARI_ACS_FNGRP_EN_WIDTH 1
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_LBN 0
+#define PCRF_CZ_ARI_MFVC_FNGRP_EN_WIDTH 1
+
+
+/*
+ * PC_SEC_PCIE_CAP_REG(32bit):
+ * Secondary PCIE Capability Register
+ */
+
+#define PCR_DZ_SEC_PCIE_CAP_REG 0x00000160
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_SEC_NXT_PTR_LBN 20
+#define PCRF_DZ_SEC_NXT_PTR_WIDTH 12
+#define PCRF_DZ_SEC_VERSION_LBN 16
+#define PCRF_DZ_SEC_VERSION_WIDTH 4
+#define PCRF_DZ_SEC_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_SEC_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_HDR_REG(32bit):
+ * SRIOV capability header register
+ */
+
+#define PCR_CC_SRIOV_CAP_HDR_REG 0x00000160
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_HDR_REG 0x00000180
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_LBN 20
+#define PCRF_CZ_SRIOVCAPHDR_NXT_PTR_WIDTH 12
+#define PCRF_CZ_SRIOVCAPHDR_VER_LBN 16
+#define PCRF_CZ_SRIOVCAPHDR_VER_WIDTH 4
+#define PCRF_CZ_SRIOVCAPHDR_ID_LBN 0
+#define PCRF_CZ_SRIOVCAPHDR_ID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_CAP_REG(32bit):
+ * SRIOV Capabilities
+ */
+
+#define PCR_CC_SRIOV_CAP_REG 0x00000164
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CAP_REG 0x00000184
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_LBN 21
+#define PCRF_CZ_VF_MIGR_INT_MSG_NUM_WIDTH 11
+#define PCRF_DZ_VF_ARI_CAP_PRESV_LBN 1
+#define PCRF_DZ_VF_ARI_CAP_PRESV_WIDTH 1
+#define PCRF_CZ_VF_MIGR_CAP_LBN 0
+#define PCRF_CZ_VF_MIGR_CAP_WIDTH 1
+
+
+/*
+ * PC_LINK_CONTROL3_REG(32bit):
+ * Link Control 3.
+ */
+
+#define PCR_DZ_LINK_CONTROL3_REG 0x00000164
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LINK_EQ_INT_EN_LBN 1
+#define PCRF_DZ_LINK_EQ_INT_EN_WIDTH 1
+#define PCRF_DZ_PERFORM_EQL_LBN 0
+#define PCRF_DZ_PERFORM_EQL_WIDTH 1
+
+
+/*
+ * PC_LANE_ERROR_STAT_REG(32bit):
+ * Lane Error Status Register.
+ */
+
+#define PCR_DZ_LANE_ERROR_STAT_REG 0x00000168
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE_STATUS_LBN 0
+#define PCRF_DZ_LANE_STATUS_WIDTH 8
+
+
+/*
+ * PC_SRIOV_CTL_REG(16bit):
+ * SRIOV Control
+ */
+
+#define PCR_CC_SRIOV_CTL_REG 0x00000168
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_CTL_REG 0x00000188
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_LBN 4
+#define PCRF_CZ_VF_ARI_CAP_HRCHY_WIDTH 1
+#define PCRF_CZ_VF_MSE_LBN 3
+#define PCRF_CZ_VF_MSE_WIDTH 1
+#define PCRF_CZ_VF_MIGR_INT_EN_LBN 2
+#define PCRF_CZ_VF_MIGR_INT_EN_WIDTH 1
+#define PCRF_CZ_VF_MIGR_EN_LBN 1
+#define PCRF_CZ_VF_MIGR_EN_WIDTH 1
+#define PCRF_CZ_VF_EN_LBN 0
+#define PCRF_CZ_VF_EN_WIDTH 1
+
+
+/*
+ * PC_SRIOV_STAT_REG(16bit):
+ * SRIOV Status
+ */
+
+#define PCR_CC_SRIOV_STAT_REG 0x0000016a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_STAT_REG 0x0000018a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_STAT_LBN 0
+#define PCRF_CZ_VF_MIGR_STAT_WIDTH 1
+
+
+/*
+ * PC_LANE01_EQU_CONTROL_REG(32bit):
+ * Lanes 0,1 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE01_EQU_CONTROL_REG 0x0000016c
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE1_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE1_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE0_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE0_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_INITIALVFS_REG(16bit):
+ * SRIOV Initial VFs
+ */
+
+#define PCR_CC_SRIOV_INITIALVFS_REG 0x0000016c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_INITIALVFS_REG 0x0000018c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_INITIALVFS_LBN 0
+#define PCRF_CZ_VF_INITIALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_TOTALVFS_REG(10bit):
+ * SRIOV Total VFs
+ */
+
+#define PCR_CC_SRIOV_TOTALVFS_REG 0x0000016e
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_TOTALVFS_REG 0x0000018e
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_TOTALVFS_LBN 0
+#define PCRF_CZ_VF_TOTALVFS_WIDTH 16
+
+
+/*
+ * PC_SRIOV_NUMVFS_REG(16bit):
+ * SRIOV Number of VFs
+ */
+
+#define PCR_CC_SRIOV_NUMVFS_REG 0x00000170
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_NUMVFS_REG 0x00000190
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_NUMVFS_LBN 0
+#define PCRF_CZ_VF_NUMVFS_WIDTH 16
+
+
+/*
+ * PC_LANE23_EQU_CONTROL_REG(32bit):
+ * Lanes 2,3 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE23_EQU_CONTROL_REG 0x00000170
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE3_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE3_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE2_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE2_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_FN_DPND_LNK_REG(16bit):
+ * SRIOV Function dependency link
+ */
+
+#define PCR_CC_SRIOV_FN_DPND_LNK_REG 0x00000172
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_FN_DPND_LNK_REG 0x00000192
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_LBN 0
+#define PCRF_CZ_SRIOV_FN_DPND_LNK_WIDTH 8
+
+
+/*
+ * PC_SRIOV_1STVF_OFFSET_REG(16bit):
+ * SRIOV First VF Offset
+ */
+
+#define PCR_CC_SRIOV_1STVF_OFFSET_REG 0x00000174
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_1STVF_OFFSET_REG 0x00000194
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_1STVF_OFFSET_LBN 0
+#define PCRF_CZ_VF_1STVF_OFFSET_WIDTH 16
+
+
+/*
+ * PC_LANE45_EQU_CONTROL_REG(32bit):
+ * Lanes 4,5 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE45_EQU_CONTROL_REG 0x00000174
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE5_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE5_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE4_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE4_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_VFSTRIDE_REG(16bit):
+ * SRIOV VF Stride
+ */
+
+#define PCR_CC_SRIOV_VFSTRIDE_REG 0x00000176
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_VFSTRIDE_REG 0x00000196
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_VFSTRIDE_LBN 0
+#define PCRF_CZ_VF_VFSTRIDE_WIDTH 16
+
+
+/*
+ * PC_LANE67_EQU_CONTROL_REG(32bit):
+ * Lanes 6,7 Equalization Control Register.
+ */
+
+#define PCR_DZ_LANE67_EQU_CONTROL_REG 0x00000178
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LANE7_EQ_CTRL_LBN 16
+#define PCRF_DZ_LANE7_EQ_CTRL_WIDTH 16
+#define PCRF_DZ_LANE6_EQ_CTRL_LBN 0
+#define PCRF_DZ_LANE6_EQ_CTRL_WIDTH 16
+
+
+/*
+ * PC_SRIOV_DEVID_REG(16bit):
+ * SRIOV VF Device ID
+ */
+
+#define PCR_CC_SRIOV_DEVID_REG 0x0000017a
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_DEVID_REG 0x0000019a
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_DEVID_LBN 0
+#define PCRF_CZ_VF_DEVID_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SUP_PAGESZ_REG(16bit):
+ * SRIOV Supported Page Sizes
+ */
+
+#define PCR_CC_SRIOV_SUP_PAGESZ_REG 0x0000017c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SUP_PAGESZ_REG 0x0000019c
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SUP_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SUP_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_SYS_PAGESZ_REG(32bit):
+ * SRIOV System Page Size
+ */
+
+#define PCR_CC_SRIOV_SYS_PAGESZ_REG 0x00000180
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_SYS_PAGESZ_REG 0x000001a0
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_SYS_PAGESZ_LBN 0
+#define PCRF_CZ_VF_SYS_PAGESZ_WIDTH 16
+
+
+/*
+ * PC_SRIOV_BAR0_REG(32bit):
+ * SRIOV VF Bar0
+ */
+
+#define PCR_CC_SRIOV_BAR0_REG 0x00000184
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR0_REG 0x000001a4
+/* hunta0=pci_f0_config */
+
+#define PCRF_CC_VF_BAR_ADDRESS_LBN 0
+#define PCRF_CC_VF_BAR_ADDRESS_WIDTH 32
+#define PCRF_DZ_VF_BAR0_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR0_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR0_PREF_LBN 3
+#define PCRF_DZ_VF_BAR0_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR0_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR0_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR0_IOM_LBN 0
+#define PCRF_DZ_VF_BAR0_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR1_REG(32bit):
+ * SRIOV Bar1
+ */
+
+#define PCR_CC_SRIOV_BAR1_REG 0x00000188
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR1_REG 0x000001a8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR1_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR1_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR2_REG(32bit):
+ * SRIOV Bar2
+ */
+
+#define PCR_CC_SRIOV_BAR2_REG 0x0000018c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR2_REG 0x000001ac
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR2_ADDRESS_LBN 4
+#define PCRF_DZ_VF_BAR2_ADDRESS_WIDTH 28
+#define PCRF_DZ_VF_BAR2_PREF_LBN 3
+#define PCRF_DZ_VF_BAR2_PREF_WIDTH 1
+#define PCRF_DZ_VF_BAR2_TYPE_LBN 1
+#define PCRF_DZ_VF_BAR2_TYPE_WIDTH 2
+#define PCRF_DZ_VF_BAR2_IOM_LBN 0
+#define PCRF_DZ_VF_BAR2_IOM_WIDTH 1
+
+
+/*
+ * PC_SRIOV_BAR3_REG(32bit):
+ * SRIOV Bar3
+ */
+
+#define PCR_CC_SRIOV_BAR3_REG 0x00000190
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR3_REG 0x000001b0
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR3_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR3_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR4_REG(32bit):
+ * SRIOV Bar4
+ */
+
+#define PCR_CC_SRIOV_BAR4_REG 0x00000194
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR4_REG 0x000001b4
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR4_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR4_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_BAR5_REG(32bit):
+ * SRIOV Bar5
+ */
+
+#define PCR_CC_SRIOV_BAR5_REG 0x00000198
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_BAR5_REG 0x000001b8
+/* hunta0=pci_f0_config */
+
+/* defined as PCRF_CC_VF_BAR_ADDRESS_LBN 0; */
+/* defined as PCRF_CC_VF_BAR_ADDRESS_WIDTH 32 */
+#define PCRF_DZ_VF_BAR5_ADDRESS_LBN 0
+#define PCRF_DZ_VF_BAR5_ADDRESS_WIDTH 32
+
+
+/*
+ * PC_SRIOV_RSVD_REG(16bit):
+ * Reserved register
+ */
+
+#define PCR_DZ_SRIOV_RSVD_REG 0x00000198
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_VF_RSVD_LBN 0
+#define PCRF_DZ_VF_RSVD_WIDTH 16
+
+
+/*
+ * PC_SRIOV_MIBR_SARRAY_OFFSET_REG(32bit):
+ * SRIOV VF Migration State Array Offset
+ */
+
+#define PCR_CC_SRIOV_MIBR_SARRAY_OFFSET_REG 0x0000019c
+/* sienaa0=pci_f0_config */
+
+#define PCR_DZ_SRIOV_MIBR_SARRAY_OFFSET_REG 0x000001bc
+/* hunta0=pci_f0_config */
+
+#define PCRF_CZ_VF_MIGR_OFFSET_LBN 3
+#define PCRF_CZ_VF_MIGR_OFFSET_WIDTH 29
+#define PCRF_CZ_VF_MIGR_BIR_LBN 0
+#define PCRF_CZ_VF_MIGR_BIR_WIDTH 3
+
+
+/*
+ * PC_TPH_CAP_HDR_REG(32bit):
+ * TPH Capability Header Register
+ */
+
+#define PCR_DZ_TPH_CAP_HDR_REG 0x000001c0
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_NXT_PTR_LBN 20
+#define PCRF_DZ_TPH_NXT_PTR_WIDTH 12
+#define PCRF_DZ_TPH_VERSION_LBN 16
+#define PCRF_DZ_TPH_VERSION_WIDTH 4
+#define PCRF_DZ_TPH_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_TPH_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_TPH_REQ_CAP_REG(32bit):
+ * TPH Requester Capability Register
+ */
+
+#define PCR_DZ_TPH_REQ_CAP_REG 0x000001c4
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_ST_TBLE_SIZE_LBN 16
+#define PCRF_DZ_ST_TBLE_SIZE_WIDTH 11
+#define PCRF_DZ_ST_TBLE_LOC_LBN 9
+#define PCRF_DZ_ST_TBLE_LOC_WIDTH 2
+#define PCRF_DZ_EXT_TPH_MODE_SUP_LBN 8
+#define PCRF_DZ_EXT_TPH_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_DEV_MODE_SUP_LBN 2
+#define PCRF_DZ_TPH_DEV_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_LBN 1
+#define PCRF_DZ_TPH_INT_MODE_SUP_WIDTH 1
+#define PCRF_DZ_TPH_NOST_MODE_SUP_LBN 0
+#define PCRF_DZ_TPH_NOST_MODE_SUP_WIDTH 1
+
+
+/*
+ * PC_TPH_REQ_CTL_REG(32bit):
+ * TPH Requester Control Register
+ */
+
+#define PCR_DZ_TPH_REQ_CTL_REG 0x000001c8
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_TPH_REQ_ENABLE_LBN 8
+#define PCRF_DZ_TPH_REQ_ENABLE_WIDTH 2
+#define PCRF_DZ_TPH_ST_MODE_LBN 0
+#define PCRF_DZ_TPH_ST_MODE_WIDTH 3
+
+
+/*
+ * PC_LTR_CAP_HDR_REG(32bit):
+ * Latency Tolerance Reporting Cap Header Reg
+ */
+
+#define PCR_DZ_LTR_CAP_HDR_REG 0x00000290
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_NXT_PTR_LBN 20
+#define PCRF_DZ_LTR_NXT_PTR_WIDTH 12
+#define PCRF_DZ_LTR_VERSION_LBN 16
+#define PCRF_DZ_LTR_VERSION_WIDTH 4
+#define PCRF_DZ_LTR_EXT_CAP_ID_LBN 0
+#define PCRF_DZ_LTR_EXT_CAP_ID_WIDTH 16
+
+
+/*
+ * PC_LTR_MAX_SNOOP_REG(32bit):
+ * LTR Maximum Snoop/No Snoop Register
+ */
+
+#define PCR_DZ_LTR_MAX_SNOOP_REG 0x00000294
+/* hunta0=pci_f0_config */
+
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_LBN 26
+#define PCRF_DZ_LTR_MAX_NOSNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_LBN 16
+#define PCRF_DZ_LTR_MAX_NOSNOOP_LAT_WIDTH 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_LBN 10
+#define PCRF_DZ_LTR_MAX_SNOOP_SCALE_WIDTH 3
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_LBN 0
+#define PCRF_DZ_LTR_MAX_SNOOP_LAT_WIDTH 10
+
+
+/*
+ * PC_ACK_LAT_TMR_REG(32bit):
+ * ACK latency timer & replay timer register
+ */
+
+#define PCR_AC_ACK_LAT_TMR_REG 0x00000700
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RT_LBN 16
+#define PCRF_AC_RT_WIDTH 16
+#define PCRF_AC_ALT_LBN 0
+#define PCRF_AC_ALT_WIDTH 16
+
+
+/*
+ * PC_OTHER_MSG_REG(32bit):
+ * Other message register
+ */
+
+#define PCR_AC_OTHER_MSG_REG 0x00000704
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_OM_CRPT3_LBN 24
+#define PCRF_AC_OM_CRPT3_WIDTH 8
+#define PCRF_AC_OM_CRPT2_LBN 16
+#define PCRF_AC_OM_CRPT2_WIDTH 8
+#define PCRF_AC_OM_CRPT1_LBN 8
+#define PCRF_AC_OM_CRPT1_WIDTH 8
+#define PCRF_AC_OM_CRPT0_LBN 0
+#define PCRF_AC_OM_CRPT0_WIDTH 8
+
+
+/*
+ * PC_FORCE_LNK_REG(24bit):
+ * Port force link register
+ */
+
+#define PCR_AC_FORCE_LNK_REG 0x00000708
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_LFS_LBN 16
+#define PCRF_AC_LFS_WIDTH 6
+#define PCRF_AC_FL_LBN 15
+#define PCRF_AC_FL_WIDTH 1
+#define PCRF_AC_LN_LBN 0
+#define PCRF_AC_LN_WIDTH 8
+
+
+/*
+ * PC_ACK_FREQ_REG(32bit):
+ * ACK frequency register
+ */
+
+#define PCR_AC_ACK_FREQ_REG 0x0000070c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_LBN 30
+#define PCRF_CC_ALLOW_L1_WITHOUT_L0S_WIDTH 1
+#define PCRF_AC_L1_ENTR_LAT_LBN 27
+#define PCRF_AC_L1_ENTR_LAT_WIDTH 3
+#define PCRF_AC_L0_ENTR_LAT_LBN 24
+#define PCRF_AC_L0_ENTR_LAT_WIDTH 3
+#define PCRF_CC_COMM_NFTS_LBN 16
+#define PCRF_CC_COMM_NFTS_WIDTH 8
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_LBN 16
+#define PCRF_AB_ACK_FREQ_REG_RSVD0_WIDTH 3
+#define PCRF_AC_MAX_FTS_LBN 8
+#define PCRF_AC_MAX_FTS_WIDTH 8
+#define PCRF_AC_ACK_FREQ_LBN 0
+#define PCRF_AC_ACK_FREQ_WIDTH 8
+
+
+/*
+ * PC_PORT_LNK_CTL_REG(32bit):
+ * Port link control register
+ */
+
+#define PCR_AC_PORT_LNK_CTL_REG 0x00000710
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AB_LRE_LBN 27
+#define PCRF_AB_LRE_WIDTH 1
+#define PCRF_AB_ESYNC_LBN 26
+#define PCRF_AB_ESYNC_WIDTH 1
+#define PCRF_AB_CRPT_LBN 25
+#define PCRF_AB_CRPT_WIDTH 1
+#define PCRF_AB_XB_LBN 24
+#define PCRF_AB_XB_WIDTH 1
+#define PCRF_AC_LC_LBN 16
+#define PCRF_AC_LC_WIDTH 6
+#define PCRF_AC_LDR_LBN 8
+#define PCRF_AC_LDR_WIDTH 4
+#define PCRF_AC_FLM_LBN 7
+#define PCRF_AC_FLM_WIDTH 1
+#define PCRF_AC_LKD_LBN 6
+#define PCRF_AC_LKD_WIDTH 1
+#define PCRF_AC_DLE_LBN 5
+#define PCRF_AC_DLE_WIDTH 1
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_LBN 4
+#define PCRF_AB_PORT_LNK_CTL_REG_RSVD0_WIDTH 1
+#define PCRF_AC_RA_LBN 3
+#define PCRF_AC_RA_WIDTH 1
+#define PCRF_AC_LE_LBN 2
+#define PCRF_AC_LE_WIDTH 1
+#define PCRF_AC_SD_LBN 1
+#define PCRF_AC_SD_WIDTH 1
+#define PCRF_AC_OMR_LBN 0
+#define PCRF_AC_OMR_WIDTH 1
+
+
+/*
+ * PC_LN_SKEW_REG(32bit):
+ * Lane skew register
+ */
+
+#define PCR_AC_LN_SKEW_REG 0x00000714
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_DIS_LBN 31
+#define PCRF_AC_DIS_WIDTH 1
+#define PCRF_AB_RST_LBN 30
+#define PCRF_AB_RST_WIDTH 1
+#define PCRF_AC_AD_LBN 25
+#define PCRF_AC_AD_WIDTH 1
+#define PCRF_AC_FCD_LBN 24
+#define PCRF_AC_FCD_WIDTH 1
+#define PCRF_AC_LS2_LBN 16
+#define PCRF_AC_LS2_WIDTH 8
+#define PCRF_AC_LS1_LBN 8
+#define PCRF_AC_LS1_WIDTH 8
+#define PCRF_AC_LS0_LBN 0
+#define PCRF_AC_LS0_WIDTH 8
+
+
+/*
+ * PC_SYM_NUM_REG(16bit):
+ * Symbol number register
+ */
+
+#define PCR_AC_SYM_NUM_REG 0x00000718
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_CC_MAX_FUNCTIONS_LBN 29
+#define PCRF_CC_MAX_FUNCTIONS_WIDTH 3
+#define PCRF_CC_FC_WATCHDOG_TMR_LBN 24
+#define PCRF_CC_FC_WATCHDOG_TMR_WIDTH 5
+#define PCRF_CC_ACK_NAK_TMR_MOD_LBN 19
+#define PCRF_CC_ACK_NAK_TMR_MOD_WIDTH 5
+#define PCRF_CC_REPLAY_TMR_MOD_LBN 14
+#define PCRF_CC_REPLAY_TMR_MOD_WIDTH 5
+#define PCRF_AB_ES_LBN 12
+#define PCRF_AB_ES_WIDTH 3
+#define PCRF_AB_SYM_NUM_REG_RSVD0_LBN 11
+#define PCRF_AB_SYM_NUM_REG_RSVD0_WIDTH 1
+#define PCRF_CC_NUM_SKP_SYMS_LBN 8
+#define PCRF_CC_NUM_SKP_SYMS_WIDTH 3
+#define PCRF_AB_TS2_LBN 4
+#define PCRF_AB_TS2_WIDTH 4
+#define PCRF_AC_TS1_LBN 0
+#define PCRF_AC_TS1_WIDTH 4
+
+
+/*
+ * PC_SYM_TMR_FLT_MSK_REG(16bit):
+ * Symbol timer and Filter Mask Register
+ */
+
+#define PCR_CC_SYM_TMR_FLT_MSK_REG 0x0000071c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK1_LBN 16
+#define PCRF_CC_DEFAULT_FLT_MSK1_WIDTH 16
+#define PCRF_CC_FC_WDOG_TMR_DIS_LBN 15
+#define PCRF_CC_FC_WDOG_TMR_DIS_WIDTH 1
+#define PCRF_CC_SI1_LBN 8
+#define PCRF_CC_SI1_WIDTH 3
+#define PCRF_CC_SKIP_INT_VAL_LBN 0
+#define PCRF_CC_SKIP_INT_VAL_WIDTH 11
+#define PCRF_CC_SI0_LBN 0
+#define PCRF_CC_SI0_WIDTH 8
+
+
+/*
+ * PC_SYM_TMR_REG(16bit):
+ * Symbol timer register
+ */
+
+#define PCR_AB_SYM_TMR_REG 0x0000071c
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCRF_AB_ET_LBN 11
+#define PCRF_AB_ET_WIDTH 4
+#define PCRF_AB_SI1_LBN 8
+#define PCRF_AB_SI1_WIDTH 3
+#define PCRF_AB_SI0_LBN 0
+#define PCRF_AB_SI0_WIDTH 8
+
+
+/*
+ * PC_FLT_MSK_REG(32bit):
+ * Filter Mask Register 2
+ */
+
+#define PCR_CC_FLT_MSK_REG 0x00000720
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_DEFAULT_FLT_MSK2_LBN 0
+#define PCRF_CC_DEFAULT_FLT_MSK2_WIDTH 32
+
+
+/*
+ * PC_PHY_STAT_REG(32bit):
+ * PHY status register
+ */
+
+#define PCR_AB_PHY_STAT_REG 0x00000720
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_STAT_REG 0x00000810
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_SSL_LBN 3
+#define PCRF_AC_SSL_WIDTH 1
+#define PCRF_AC_SSR_LBN 2
+#define PCRF_AC_SSR_WIDTH 1
+#define PCRF_AC_SSCL_LBN 1
+#define PCRF_AC_SSCL_WIDTH 1
+#define PCRF_AC_SSCD_LBN 0
+#define PCRF_AC_SSCD_WIDTH 1
+
+
+/*
+ * PC_PHY_CTL_REG(32bit):
+ * PHY control register
+ */
+
+#define PCR_AB_PHY_CTL_REG 0x00000724
+/* falcona0,falconb0=pci_f0_config */
+
+#define PCR_CC_PHY_CTL_REG 0x00000814
+/* sienaa0=pci_f0_config */
+
+#define PCRF_AC_BD_LBN 31
+#define PCRF_AC_BD_WIDTH 1
+#define PCRF_AC_CDS_LBN 30
+#define PCRF_AC_CDS_WIDTH 1
+#define PCRF_AC_DWRAP_LB_LBN 29
+#define PCRF_AC_DWRAP_LB_WIDTH 1
+#define PCRF_AC_EBD_LBN 28
+#define PCRF_AC_EBD_WIDTH 1
+#define PCRF_AC_SNR_LBN 27
+#define PCRF_AC_SNR_WIDTH 1
+#define PCRF_AC_RX_NOT_DET_LBN 2
+#define PCRF_AC_RX_NOT_DET_WIDTH 1
+#define PCRF_AC_FORCE_LOS_VAL_LBN 1
+#define PCRF_AC_FORCE_LOS_VAL_WIDTH 1
+#define PCRF_AC_FORCE_LOS_EN_LBN 0
+#define PCRF_AC_FORCE_LOS_EN_WIDTH 1
+
+
+/*
+ * PC_DEBUG0_REG(32bit):
+ * Debug register 0
+ */
+
+#define PCR_AC_DEBUG0_REG 0x00000728
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI03_LBN 24
+#define PCRF_AC_CDI03_WIDTH 8
+#define PCRF_AC_CDI0_LBN 0
+#define PCRF_AC_CDI0_WIDTH 32
+#define PCRF_AC_CDI02_LBN 16
+#define PCRF_AC_CDI02_WIDTH 8
+#define PCRF_AC_CDI01_LBN 8
+#define PCRF_AC_CDI01_WIDTH 8
+#define PCRF_AC_CDI00_LBN 0
+#define PCRF_AC_CDI00_WIDTH 8
+
+
+/*
+ * PC_DEBUG1_REG(32bit):
+ * Debug register 1
+ */
+
+#define PCR_AC_DEBUG1_REG 0x0000072c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_CDI13_LBN 24
+#define PCRF_AC_CDI13_WIDTH 8
+#define PCRF_AC_CDI1_LBN 0
+#define PCRF_AC_CDI1_WIDTH 32
+#define PCRF_AC_CDI12_LBN 16
+#define PCRF_AC_CDI12_WIDTH 8
+#define PCRF_AC_CDI11_LBN 8
+#define PCRF_AC_CDI11_WIDTH 8
+#define PCRF_AC_CDI10_LBN 0
+#define PCRF_AC_CDI10_WIDTH 8
+
+
+/*
+ * PC_XPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XPFCC_STAT_REG
+ */
+
+#define PCR_AC_XPFCC_STAT_REG 0x00000730
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XPDC_LBN 12
+#define PCRF_AC_XPDC_WIDTH 8
+#define PCRF_AC_XPHC_LBN 0
+#define PCRF_AC_XPHC_WIDTH 12
+
+
+/*
+ * PC_XNPFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XNPFCC_STAT_REG
+ */
+
+#define PCR_AC_XNPFCC_STAT_REG 0x00000734
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XNPDC_LBN 12
+#define PCRF_AC_XNPDC_WIDTH 8
+#define PCRF_AC_XNPHC_LBN 0
+#define PCRF_AC_XNPHC_WIDTH 12
+
+
+/*
+ * PC_XCFCC_STAT_REG(24bit):
+ * documentation to be written for sum_PC_XCFCC_STAT_REG
+ */
+
+#define PCR_AC_XCFCC_STAT_REG 0x00000738
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_XCDC_LBN 12
+#define PCRF_AC_XCDC_WIDTH 8
+#define PCRF_AC_XCHC_LBN 0
+#define PCRF_AC_XCHC_WIDTH 12
+
+
+/*
+ * PC_Q_STAT_REG(8bit):
+ * documentation to be written for sum_PC_Q_STAT_REG
+ */
+
+#define PCR_AC_Q_STAT_REG 0x0000073c
+/* falcona0,falconb0,sienaa0=pci_f0_config */
+
+#define PCRF_AC_RQNE_LBN 2
+#define PCRF_AC_RQNE_WIDTH 1
+#define PCRF_AC_XRNE_LBN 1
+#define PCRF_AC_XRNE_WIDTH 1
+#define PCRF_AC_RCNR_LBN 0
+#define PCRF_AC_RCNR_WIDTH 1
+
+
+/*
+ * PC_VC_XMIT_ARB1_REG(32bit):
+ * VC Transmit Arbitration Register 1
+ */
+
+#define PCR_CC_VC_XMIT_ARB1_REG 0x00000740
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC_XMIT_ARB2_REG(32bit):
+ * VC Transmit Arbitration Register 2
+ */
+
+#define PCR_CC_VC_XMIT_ARB2_REG 0x00000744
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_P_RQ_CTL_REG(32bit):
+ * VC0 Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_P_RQ_CTL_REG 0x00000748
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_NP_RQ_CTL_REG(32bit):
+ * VC0 Non-Posted Receive Queue Control
+ */
+
+#define PCR_CC_VC0_NP_RQ_CTL_REG 0x0000074c
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_VC0_C_RQ_CTL_REG(32bit):
+ * VC0 Completion Receive Queue Control
+ */
+
+#define PCR_CC_VC0_C_RQ_CTL_REG 0x00000750
+/* sienaa0=pci_f0_config */
+
+
+
+/*
+ * PC_GEN2_REG(32bit):
+ * Gen2 Register
+ */
+
+#define PCR_CC_GEN2_REG 0x0000080c
+/* sienaa0=pci_f0_config */
+
+#define PCRF_CC_SET_DE_EMPHASIS_LBN 20
+#define PCRF_CC_SET_DE_EMPHASIS_WIDTH 1
+#define PCRF_CC_CFG_TX_COMPLIANCE_LBN 19
+#define PCRF_CC_CFG_TX_COMPLIANCE_WIDTH 1
+#define PCRF_CC_CFG_TX_SWING_LBN 18
+#define PCRF_CC_CFG_TX_SWING_WIDTH 1
+#define PCRF_CC_DIR_SPEED_CHANGE_LBN 17
+#define PCRF_CC_DIR_SPEED_CHANGE_WIDTH 1
+#define PCRF_CC_LANE_ENABLE_LBN 8
+#define PCRF_CC_LANE_ENABLE_WIDTH 9
+#define PCRF_CC_NUM_FTS_LBN 0
+#define PCRF_CC_NUM_FTS_WIDTH 8
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_REGS_PCI_H */
diff --git a/drivers/net/sfc/base/efx_rx.c b/drivers/net/sfc/base/efx_rx.c
new file mode 100644
index 00000000..c8156341
--- /dev/null
+++ b/drivers/net/sfc/base/efx_rx.c
@@ -0,0 +1,1315 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size);
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n);
+
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n);
+
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer);
+
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp);
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added);
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp);
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+static void
+siena_rx_qps_update_credits(
+ __in efx_rxq_t *erp);
+
+static __checkReturn uint8_t *
+siena_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp);
+#endif
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp);
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp);
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp);
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_rx_ops_t __efx_rx_siena_ops = {
+ siena_rx_init, /* erxo_init */
+ siena_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ siena_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ siena_rx_scale_mode_set, /* erxo_scale_mode_set */
+ siena_rx_scale_key_set, /* erxo_scale_key_set */
+ siena_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ siena_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ siena_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ siena_rx_qpost, /* erxo_qpost */
+ siena_rx_qpush, /* erxo_qpush */
+#if EFSYS_OPT_RX_PACKED_STREAM
+ siena_rx_qps_update_credits, /* erxo_qps_update_credits */
+ siena_rx_qps_packet_info, /* erxo_qps_packet_info */
+#endif
+ siena_rx_qflush, /* erxo_qflush */
+ siena_rx_qenable, /* erxo_qenable */
+ siena_rx_qcreate, /* erxo_qcreate */
+ siena_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+static const efx_rx_ops_t __efx_rx_ef10_ops = {
+ ef10_rx_init, /* erxo_init */
+ ef10_rx_fini, /* erxo_fini */
+#if EFSYS_OPT_RX_SCATTER
+ ef10_rx_scatter_enable, /* erxo_scatter_enable */
+#endif
+#if EFSYS_OPT_RX_SCALE
+ ef10_rx_scale_mode_set, /* erxo_scale_mode_set */
+ ef10_rx_scale_key_set, /* erxo_scale_key_set */
+ ef10_rx_scale_tbl_set, /* erxo_scale_tbl_set */
+ ef10_rx_prefix_hash, /* erxo_prefix_hash */
+#endif
+ ef10_rx_prefix_pktlen, /* erxo_prefix_pktlen */
+ ef10_rx_qpost, /* erxo_qpost */
+ ef10_rx_qpush, /* erxo_qpush */
+#if EFSYS_OPT_RX_PACKED_STREAM
+ ef10_rx_qps_update_credits, /* erxo_qps_update_credits */
+ ef10_rx_qps_packet_info, /* erxo_qps_packet_info */
+#endif
+ ef10_rx_qflush, /* erxo_qflush */
+ ef10_rx_qenable, /* erxo_qenable */
+ ef10_rx_qcreate, /* erxo_qcreate */
+ ef10_rx_qdestroy, /* erxo_qdestroy */
+};
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+
+ __checkReturn efx_rc_t
+efx_rx_init(
+ __inout efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_RX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ erxop = &__efx_rx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ erxop = &__efx_rx_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ if ((rc = erxop->erxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_erxop = erxop;
+ enp->en_mod_flags |= EFX_MOD_RX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+ return (rc);
+}
+
+ void
+efx_rx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+ EFSYS_ASSERT3U(enp->en_rx_qcount, ==, 0);
+
+ erxop->erxo_fini(enp);
+
+ enp->en_erxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_RX;
+}
+
+#if EFSYS_OPT_RX_SCATTER
+ __checkReturn efx_rc_t
+efx_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scatter_enable(enp, buf_size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_hash_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_hash_support_t *supportp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (supportp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Report if resources are available to insert RX hash value */
+ *supportp = enp->en_hash_support;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_scale_support_get(
+ __in efx_nic_t *enp,
+ __out efx_rx_scale_support_t *supportp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (supportp == NULL) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Report if resources are available to support RSS */
+ *supportp = enp->en_rss_support;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if (erxop->erxo_scale_mode_set != NULL) {
+ if ((rc = erxop->erxo_scale_mode_set(enp, alg,
+ type, insert)) != 0)
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_key_set(enp, key, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn efx_rc_t
+efx_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ if ((rc = erxop->erxo_scale_tbl_set(enp, table, n)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ void
+efx_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpost(erp, addrp, size, n, completed, added);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+
+ void
+efx_rx_qps_update_credits(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qps_update_credits(erp);
+}
+
+ __checkReturn uint8_t *
+efx_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ return (erxop->erxo_qps_packet_info(erp, buffer,
+ buffer_length, current_offset, lengthp,
+ next_offsetp, timestamp));
+}
+
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+ void
+efx_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qpush(erp, added, pushedp);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ if ((rc = erxop->erxo_qflush(erp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qenable(erp);
+}
+
+ __checkReturn efx_rc_t
+efx_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __deref_out efx_rxq_t **erpp)
+{
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+ efx_rxq_t *erp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_RX);
+
+ /* Allocate an RXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_rxq_t), erp);
+
+ if (erp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ erp->er_magic = EFX_RXQ_MAGIC;
+ erp->er_enp = enp;
+ erp->er_index = index;
+ erp->er_mask = n - 1;
+ erp->er_esmp = esmp;
+
+ if ((rc = erxop->erxo_qcreate(enp, index, label, type, esmp, n, id,
+ eep, erp)) != 0)
+ goto fail2;
+
+ enp->en_rx_qcount++;
+ *erpp = erp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ erxop->erxo_qdestroy(erp);
+}
+
+ __checkReturn efx_rc_t
+efx_pseudo_hdr_pkt_length_get(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ return (erxop->erxo_prefix_pktlen(enp, buffer, lengthp));
+}
+
+#if EFSYS_OPT_RX_SCALE
+ __checkReturn uint32_t
+efx_pseudo_hdr_hash_get(
+ __in efx_rxq_t *erp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ efx_nic_t *enp = erp->er_enp;
+ const efx_rx_ops_t *erxop = enp->en_erxop;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_hash_support, ==, EFX_RX_HASH_AVAILABLE);
+ return (erxop->erxo_prefix_hash(enp, func, buffer));
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_rx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+ unsigned int index;
+
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_DESC_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, 0x3000 / 32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Zero the RSS table */
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS;
+ index++) {
+ EFX_ZERO_OWORD(oword);
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+#if EFSYS_OPT_RX_SCALE
+ /* The RSS key and indirection table are writable. */
+ enp->en_rss_support = EFX_RX_SCALE_EXCLUSIVE;
+
+ /* Hardware can insert RX hash with/without RSS */
+ enp->en_hash_support = EFX_RX_HASH_AVAILABLE;
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return (0);
+}
+
+#if EFSYS_OPT_RX_SCATTER
+static __checkReturn efx_rc_t
+siena_rx_scatter_enable(
+ __in efx_nic_t *enp,
+ __in unsigned int buf_size)
+{
+ unsigned int nbuf32;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ nbuf32 = buf_size / 32;
+ if ((nbuf32 == 0) ||
+ (nbuf32 >= (1 << FRF_BZ_RX_USR_BUF_SIZE_WIDTH)) ||
+ ((buf_size % 32) != 0)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_rx_qcount > 0) {
+ rc = EBUSY;
+ goto fail2;
+ }
+
+ /* Set scatter buffer size */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_USR_BUF_SIZE, nbuf32);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Enable scatter for packets not matching a filter */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+
+#define EFX_RX_LFSR_HASH(_enp, _insert) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ if ((_enp)->en_family == EFX_FAMILY_SIENA) { \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 0); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, \
+ &oword); \
+ } \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV4_HASH(_enp, _insert, _ip, _tcp) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_ALG, 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_IP_HASH, \
+ (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_TCP_SUP, \
+ (_tcp) ? 0 : 1); \
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_HASH_INSRT_HDR, \
+ (_insert) ? 1 : 0); \
+ EFX_BAR_WRITEO((_enp), FR_AZ_RX_CFG_REG, &oword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_RX_TOEPLITZ_IPV6_HASH(_enp, _ip, _tcp, _rc) \
+ do { \
+ efx_oword_t oword; \
+ \
+ EFX_BAR_READO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, (_ip) ? 1 : 0); \
+ EFX_SET_OWORD_FIELD(oword, \
+ FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS, (_tcp) ? 0 : 1); \
+ EFX_BAR_WRITEO((_enp), FR_CZ_RX_RSS_IPV6_REG3, &oword); \
+ \
+ (_rc) = 0; \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+
+#if EFSYS_OPT_RX_SCALE
+
+static __checkReturn efx_rc_t
+siena_rx_scale_mode_set(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t alg,
+ __in efx_rx_hash_type_t type,
+ __in boolean_t insert)
+{
+ efx_rc_t rc;
+
+ switch (alg) {
+ case EFX_RX_HASHALG_LFSR:
+ EFX_RX_LFSR_HASH(enp, insert);
+ break;
+
+ case EFX_RX_HASHALG_TOEPLITZ:
+ EFX_RX_TOEPLITZ_IPV4_HASH(enp, insert,
+ type & EFX_RX_HASH_IPV4,
+ type & EFX_RX_HASH_TCPIPV4);
+
+ EFX_RX_TOEPLITZ_IPV6_HASH(enp,
+ type & EFX_RX_HASH_IPV6,
+ type & EFX_RX_HASH_TCPIPV6,
+ rc);
+ if (rc != 0)
+ goto fail1;
+
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ EFX_RX_LFSR_HASH(enp, B_FALSE);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_key_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) uint8_t *key,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ unsigned int byte;
+ unsigned int offset;
+ efx_rc_t rc;
+
+ byte = 0;
+
+ /* Write Toeplitz IPv4 hash key */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv4 hash key */
+ EFX_BAR_READO(enp, FR_BZ_RX_RSS_TKEY_REG, &oword);
+ for (offset = (FRF_BZ_RX_RSS_TKEY_LBN + FRF_BZ_RX_RSS_TKEY_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+
+ if ((enp->en_features & EFX_FEATURE_IPV6) == 0)
+ goto done;
+
+ byte = 0;
+
+ /* Write Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+
+ /* Write Toeplitz IPv6 hash key 2 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+
+ /* Write Toeplitz IPv6 hash key 1 */
+ EFX_ZERO_OWORD(oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset)
+ oword.eo_u8[offset - 1] = key[byte++];
+
+ EFX_BAR_WRITEO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+
+ byte = 0;
+
+ /* Verify Toeplitz IPv6 hash key 3 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG3, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 2 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG2, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ /* Verify Toeplitz IPv6 hash key 1 */
+ EFX_BAR_READO(enp, FR_CZ_RX_RSS_IPV6_REG1, &oword);
+ for (offset = (FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN +
+ FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH) / 8;
+ offset > 0 && byte < n;
+ --offset) {
+ if (oword.eo_u8[offset - 1] != key[byte++]) {
+ rc = EFAULT;
+ goto fail4;
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn efx_rc_t
+siena_rx_scale_tbl_set(
+ __in efx_nic_t *enp,
+ __in_ecount(n) unsigned int *table,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ int index;
+ efx_rc_t rc;
+
+ EFX_STATIC_ASSERT(EFX_RSS_TBL_SIZE == FR_BZ_RX_INDIRECTION_TBL_ROWS);
+ EFX_STATIC_ASSERT(EFX_MAXRSS == (1 << FRF_BZ_IT_QUEUE_WIDTH));
+
+ if (n > FR_BZ_RX_INDIRECTION_TBL_ROWS) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ for (index = 0; index < FR_BZ_RX_INDIRECTION_TBL_ROWS; index++) {
+ uint32_t byte;
+
+ /* Calculate the entry to place in the table */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ EFSYS_PROBE2(table, int, index, uint32_t, byte);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_BZ_IT_QUEUE, byte);
+
+ /* Write the table */
+ EFX_BAR_TBL_WRITEO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+ }
+
+ for (index = FR_BZ_RX_INDIRECTION_TBL_ROWS - 1; index >= 0; --index) {
+ uint32_t byte;
+
+ /* Determine if we're starting a new batch */
+ byte = (n > 0) ? (uint32_t)table[index % n] : 0;
+
+ /* Read the table */
+ EFX_BAR_TBL_READO(enp, FR_BZ_RX_INDIRECTION_TBL,
+ index, &oword, B_TRUE);
+
+ /* Verify the entry */
+ if (EFX_OWORD_FIELD(oword, FRF_BZ_IT_QUEUE) != byte) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+#endif
+
+/*
+ * Falcon/Siena pseudo-header
+ * --------------------------
+ *
+ * Receive packets are prefixed by an optional 16 byte pseudo-header.
+ * The pseudo-header is a byte array of one of the forms:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.TT.TT.TT.TT
+ * xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.xx.LL.LL
+ *
+ * where:
+ * TT.TT.TT.TT Toeplitz hash (32-bit big-endian)
+ * LL.LL LFSR hash (16-bit big-endian)
+ */
+
+#if EFSYS_OPT_RX_SCALE
+static __checkReturn uint32_t
+siena_rx_prefix_hash(
+ __in efx_nic_t *enp,
+ __in efx_rx_hash_alg_t func,
+ __in uint8_t *buffer)
+{
+ _NOTE(ARGUNUSED(enp))
+
+ switch (func) {
+ case EFX_RX_HASHALG_TOEPLITZ:
+ return ((buffer[12] << 24) |
+ (buffer[13] << 16) |
+ (buffer[14] << 8) |
+ buffer[15]);
+
+ case EFX_RX_HASHALG_LFSR:
+ return ((buffer[14] << 8) | buffer[15]);
+
+ default:
+ EFSYS_ASSERT(0);
+ return (0);
+ }
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static __checkReturn efx_rc_t
+siena_rx_prefix_pktlen(
+ __in efx_nic_t *enp,
+ __in uint8_t *buffer,
+ __out uint16_t *lengthp)
+{
+ _NOTE(ARGUNUSED(enp, buffer, lengthp))
+
+ /* Not supported by Falcon/Siena hardware */
+ EFSYS_ASSERT(0);
+ return (ENOTSUP);
+}
+
+
+static void
+siena_rx_qpost(
+ __in efx_rxq_t *erp,
+ __in_ecount(n) efsys_dma_addr_t *addrp,
+ __in size_t size,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __in unsigned int added)
+{
+ efx_qword_t qword;
+ unsigned int i;
+ unsigned int offset;
+ unsigned int id;
+
+ /* The client driver must not overfill the queue */
+ EFSYS_ASSERT3U(added - completed + n, <=,
+ EFX_RXQ_LIMIT(erp->er_mask + 1));
+
+ id = added & (erp->er_mask);
+ for (i = 0; i < n; i++) {
+ EFSYS_PROBE4(rx_post, unsigned int, erp->er_index,
+ unsigned int, id, efsys_dma_addr_t, addrp[i],
+ size_t, size);
+
+ EFX_POPULATE_QWORD_3(qword,
+ FSF_AZ_RX_KER_BUF_SIZE, (uint32_t)(size),
+ FSF_AZ_RX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addrp[i] & 0xffffffff),
+ FSF_AZ_RX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addrp[i] >> 32));
+
+ offset = id * sizeof (efx_qword_t);
+ EFSYS_MEM_WRITEQ(erp->er_esmp, offset, &qword);
+
+ id = (id + 1) & (erp->er_mask);
+ }
+}
+
+static void
+siena_rx_qpush(
+ __in efx_rxq_t *erp,
+ __in unsigned int added,
+ __inout unsigned int *pushedp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ unsigned int pushed = *pushedp;
+ uint32_t wptr;
+ efx_oword_t oword;
+ efx_dword_t dword;
+
+ /* All descriptors are pushed */
+ *pushedp = added;
+
+ /* Push the populated descriptors out */
+ wptr = added & erp->er_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(erp->er_esmp, erp->er_mask + 1,
+ wptr, pushed & erp->er_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_RX_DESC_UPD_REGP0,
+ erp->er_index, &dword, B_FALSE);
+}
+
+#if EFSYS_OPT_RX_PACKED_STREAM
+static void
+siena_rx_qps_update_credits(
+ __in efx_rxq_t *erp)
+{
+ /* Not supported by Siena hardware */
+ EFSYS_ASSERT(0);
+}
+
+static uint8_t *
+siena_rx_qps_packet_info(
+ __in efx_rxq_t *erp,
+ __in uint8_t *buffer,
+ __in uint32_t buffer_length,
+ __in uint32_t current_offset,
+ __out uint16_t *lengthp,
+ __out uint32_t *next_offsetp,
+ __out uint32_t *timestamp)
+{
+ /* Not supported by Siena hardware */
+ EFSYS_ASSERT(0);
+
+ return (NULL);
+}
+#endif /* EFSYS_OPT_RX_PACKED_STREAM */
+
+static __checkReturn efx_rc_t
+siena_rx_qflush(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ label = erp->er_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_rx_qenable(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT3U(erp->er_magic, ==, EFX_RXQ_MAGIC);
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_RX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_rx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efx_rxq_type_t type,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in efx_evq_t *eep,
+ __in efx_rxq_t *erp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ boolean_t jumbo;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(EFX_EV_RX_NLABELS ==
+ (1 << FRF_AZ_RX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_RX_NLABELS);
+ EFSYS_ASSERT3U(enp->en_rx_qcount + 1, <, encp->enc_rxq_limit);
+
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MAXNDESCS));
+ EFX_STATIC_ASSERT(ISP2(EFX_RXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_RXQ_MINNDESCS) || (n > EFX_RXQ_MAXNDESCS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_rxq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0; (1 << size) <= (EFX_RXQ_MAXNDESCS / EFX_RXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_RXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ switch (type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ jumbo = B_FALSE;
+ break;
+
+#if EFSYS_OPT_RX_SCATTER
+ case EFX_RXQ_TYPE_SCATTER:
+ if (enp->en_family < EFX_FAMILY_SIENA) {
+ rc = EINVAL;
+ goto fail4;
+ }
+ jumbo = B_TRUE;
+ break;
+#endif /* EFSYS_OPT_RX_SCATTER */
+
+ default:
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* Set up the new descriptor queue */
+ EFX_POPULATE_OWORD_7(oword,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_RX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL, label,
+ FRF_AZ_RX_DESCQ_SIZE, size,
+ FRF_AZ_RX_DESCQ_TYPE, 0,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_rx_qdestroy(
+ __in efx_rxq_t *erp)
+{
+ efx_nic_t *enp = erp->er_enp;
+ efx_oword_t oword;
+
+ EFSYS_ASSERT(enp->en_rx_qcount != 0);
+ --enp->en_rx_qcount;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_RX_DESC_PTR_TBL,
+ erp->er_index, &oword, B_TRUE);
+
+ /* Free the RXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_rxq_t), erp);
+}
+
+static void
+siena_rx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_sram.c b/drivers/net/sfc/base/efx_sram.c
new file mode 100644
index 00000000..5f4edea7
--- /dev/null
+++ b/drivers/net/sfc/base/efx_sram.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+ __checkReturn efx_rc_t
+efx_sram_buf_tbl_set(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in efsys_mem_t *esmp,
+ __in size_t n)
+{
+ efx_qword_t qword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+ efsys_dma_addr_t addr;
+ efx_oword_t oword;
+ unsigned int count;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return (0);
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ if (stop >= EFX_BUF_TBL_SIZE) {
+ rc = EFBIG;
+ goto fail1;
+ }
+
+ /* Add the entries into the buffer table */
+ addr = EFSYS_MEM_ADDR(esmp);
+ for (id = start; id != stop; id++) {
+ EFX_POPULATE_QWORD_5(qword,
+ FRF_AZ_IP_DAT_BUF_SIZE, 0, FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF_DW0,
+ (uint32_t)((addr >> 12) & 0xffffffff),
+ FRF_AZ_BUF_ADR_FBUF_DW1,
+ (uint32_t)((addr >> 12) >> 32),
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_FULL_TBL,
+ id, &qword);
+
+ addr += EFX_BUF_SIZE;
+ }
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ /* Flush the write buffer */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_BUF_UPD_CMD, 1,
+ FRF_AZ_BUF_CLR_CMD, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+ /* Poll for the last entry being written to the buffer table */
+ EFSYS_ASSERT3U(id, ==, stop);
+ addr -= EFX_BUF_SIZE;
+
+ count = 0;
+ do {
+ EFSYS_PROBE1(wait, unsigned int, count);
+
+ /* Spin for 1 ms */
+ EFSYS_SPIN(1000);
+
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) ==
+ (uint32_t)((addr >> 12) & 0xffffffff) &&
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) ==
+ (uint32_t)((addr >> 12) >> 32))
+ goto verify;
+
+ } while (++count < 100);
+
+ rc = ETIMEDOUT;
+ goto fail2;
+
+verify:
+ /* Verify the rest of the entries in the buffer table */
+ while (--id != start) {
+ addr -= EFX_BUF_SIZE;
+
+ /* Read the buffer table entry */
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_FULL_TBL,
+ id - 1, &qword);
+
+ if (EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW0) !=
+ (uint32_t)((addr >> 12) & 0xffffffff) ||
+ EFX_QWORD_FIELD(qword, FRF_AZ_BUF_ADR_FBUF_DW1) !=
+ (uint32_t)((addr >> 12) >> 32)) {
+ rc = EFAULT;
+ goto fail3;
+ }
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+ id = stop;
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, id - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_sram_buf_tbl_clear(
+ __in efx_nic_t *enp,
+ __in uint32_t id,
+ __in size_t n)
+{
+ efx_oword_t oword;
+ uint32_t start = id;
+ uint32_t stop = start + n;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+ if (enp->en_family == EFX_FAMILY_HUNTINGTON ||
+ enp->en_family == EFX_FAMILY_MEDFORD) {
+ /*
+ * FIXME: the efx_sram_buf_tbl_*() functionality needs to be
+ * pulled inside the Falcon/Siena queue create/destroy code,
+ * and then the original functions can be removed (see bug30834
+ * comment #1). But, for now, we just ensure that they are
+ * no-ops for EF10, to allow bringing up existing drivers
+ * without modification.
+ */
+
+ return;
+ }
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ EFSYS_ASSERT3U(stop, <, EFX_BUF_TBL_SIZE);
+
+ EFSYS_PROBE2(buf, uint32_t, start, uint32_t, stop - 1);
+
+ EFX_POPULATE_OWORD_4(oword, FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1, FRF_AZ_BUF_CLR_END_ID, stop - 1,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_UPD_REG, &oword);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+static void
+efx_sram_byte_increment_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ _NOTE(ARGUNUSED(negate))
+
+ for (index = 0; index < sizeof (efx_qword_t); index++)
+ eqp->eq_u8[index] = offset + index;
+}
+
+static void
+efx_sram_all_the_same_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ if (negate)
+ EFX_SET_QWORD(*eqp);
+ else
+ EFX_ZERO_QWORD(*eqp);
+}
+
+static void
+efx_sram_bit_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x55555555 : 0xaaaaaaaa,
+ EFX_DWORD_1, (negate) ? 0x55555555 : 0xaaaaaaaa);
+}
+
+static void
+efx_sram_byte_alternate_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ _NOTE(ARGUNUSED(row))
+
+ EFX_POPULATE_QWORD_2(*eqp,
+ EFX_DWORD_0, (negate) ? 0x00ff00ff : 0xff00ff00,
+ EFX_DWORD_1, (negate) ? 0x00ff00ff : 0xff00ff00);
+}
+
+static void
+efx_sram_byte_changing_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+ unsigned int index;
+
+ for (index = 0; index < sizeof (efx_qword_t); index++) {
+ uint8_t byte;
+
+ if (offset / 256 == 0)
+ byte = (uint8_t)((offset % 257) % 256);
+ else
+ byte = (uint8_t)(~((offset - 8) % 257) % 256);
+
+ eqp->eq_u8[index] = (negate) ? ~byte : byte;
+ }
+}
+
+static void
+efx_sram_bit_sweep_set(
+ __in size_t row,
+ __in boolean_t negate,
+ __out efx_qword_t *eqp)
+{
+ size_t offset = row * FR_AZ_SRM_DBG_REG_STEP;
+
+ if (negate) {
+ EFX_SET_QWORD(*eqp);
+ EFX_CLEAR_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ } else {
+ EFX_ZERO_QWORD(*eqp);
+ EFX_SET_QWORD_BIT(*eqp, (offset / sizeof (efx_qword_t)) % 64);
+ }
+}
+
+efx_sram_pattern_fn_t __efx_sram_pattern_fns[] = {
+ efx_sram_byte_increment_set,
+ efx_sram_all_the_same_set,
+ efx_sram_bit_alternate_set,
+ efx_sram_byte_alternate_set,
+ efx_sram_byte_changing_set,
+ efx_sram_bit_sweep_set
+};
+
+ __checkReturn efx_rc_t
+efx_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_pattern_type_t type)
+{
+ efx_sram_pattern_fn_t func;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
+
+ /* SRAM testing is only available on Siena. */
+ if (enp->en_family != EFX_FAMILY_SIENA)
+ return (0);
+
+ /* Select pattern generator */
+ EFSYS_ASSERT3U(type, <, EFX_PATTERN_NTYPES);
+ func = __efx_sram_pattern_fns[type];
+
+ return (siena_sram_test(enp, func));
+}
+
+#endif /* EFSYS_OPT_DIAG */
diff --git a/drivers/net/sfc/base/efx_tx.c b/drivers/net/sfc/base/efx_tx.c
new file mode 100644
index 00000000..ceb29206
--- /dev/null
+++ b/drivers/net/sfc/base/efx_tx.c
@@ -0,0 +1,1097 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_QSTATS
+#define EFX_TX_QSTAT_INCR(_etp, _stat) \
+ do { \
+ (_etp)->et_stat[_stat]++; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+#else
+#define EFX_TX_QSTAT_INCR(_etp, _stat)
+#endif
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp);
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp);
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp);
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp);
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed);
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns);
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp);
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp);
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp);
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp);
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat);
+#endif
+
+#endif /* EFSYS_OPT_SIENA */
+
+
+#if EFSYS_OPT_SIENA
+static const efx_tx_ops_t __efx_tx_siena_ops = {
+ siena_tx_init, /* etxo_init */
+ siena_tx_fini, /* etxo_fini */
+ siena_tx_qcreate, /* etxo_qcreate */
+ siena_tx_qdestroy, /* etxo_qdestroy */
+ siena_tx_qpost, /* etxo_qpost */
+ siena_tx_qpush, /* etxo_qpush */
+ siena_tx_qpace, /* etxo_qpace */
+ siena_tx_qflush, /* etxo_qflush */
+ siena_tx_qenable, /* etxo_qenable */
+ NULL, /* etxo_qpio_enable */
+ NULL, /* etxo_qpio_disable */
+ NULL, /* etxo_qpio_write */
+ NULL, /* etxo_qpio_post */
+ siena_tx_qdesc_post, /* etxo_qdesc_post */
+ siena_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ NULL, /* etxo_qdesc_tso2_create */
+ NULL, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ siena_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+static const efx_tx_ops_t __efx_tx_hunt_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ ef10_tx_qdesc_tso_create, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+static const efx_tx_ops_t __efx_tx_medford_ops = {
+ ef10_tx_init, /* etxo_init */
+ ef10_tx_fini, /* etxo_fini */
+ ef10_tx_qcreate, /* etxo_qcreate */
+ ef10_tx_qdestroy, /* etxo_qdestroy */
+ ef10_tx_qpost, /* etxo_qpost */
+ ef10_tx_qpush, /* etxo_qpush */
+ ef10_tx_qpace, /* etxo_qpace */
+ ef10_tx_qflush, /* etxo_qflush */
+ ef10_tx_qenable, /* etxo_qenable */
+ ef10_tx_qpio_enable, /* etxo_qpio_enable */
+ ef10_tx_qpio_disable, /* etxo_qpio_disable */
+ ef10_tx_qpio_write, /* etxo_qpio_write */
+ ef10_tx_qpio_post, /* etxo_qpio_post */
+ ef10_tx_qdesc_post, /* etxo_qdesc_post */
+ ef10_tx_qdesc_dma_create, /* etxo_qdesc_dma_create */
+ NULL, /* etxo_qdesc_tso_create */
+ ef10_tx_qdesc_tso2_create, /* etxo_qdesc_tso2_create */
+ ef10_tx_qdesc_vlantci_create, /* etxo_qdesc_vlantci_create */
+#if EFSYS_OPT_QSTATS
+ ef10_tx_qstats_update, /* etxo_qstats_update */
+#endif
+};
+#endif /* EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_tx_init(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+
+ if (!(enp->en_mod_flags & EFX_MOD_EV)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ if (enp->en_mod_flags & EFX_MOD_TX) {
+ rc = EINVAL;
+ goto fail2;
+ }
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ etxop = &__efx_tx_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ etxop = &__efx_tx_hunt_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ etxop = &__efx_tx_medford_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail3;
+ }
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ if ((rc = etxop->etxo_init(enp)) != 0)
+ goto fail4;
+
+ enp->en_etxop = etxop;
+ enp->en_mod_flags |= EFX_MOD_TX;
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+ return (rc);
+}
+
+ void
+efx_tx_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+ EFSYS_ASSERT3U(enp->en_tx_qcount, ==, 0);
+
+ etxop->etxo_fini(enp);
+
+ enp->en_etxop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_TX;
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __deref_out efx_txq_t **etpp,
+ __out unsigned int *addedp)
+{
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_txq_t *etp;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_TX);
+
+ EFSYS_ASSERT3U(enp->en_tx_qcount + 1, <, encp->enc_txq_limit);
+
+ /* Allocate an TXQ object */
+ EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_txq_t), etp);
+
+ if (etp == NULL) {
+ rc = ENOMEM;
+ goto fail1;
+ }
+
+ etp->et_magic = EFX_TXQ_MAGIC;
+ etp->et_enp = enp;
+ etp->et_index = index;
+ etp->et_mask = n - 1;
+ etp->et_esmp = esmp;
+
+ /* Initial descriptor index may be modified by etxo_qcreate */
+ *addedp = 0;
+
+ if ((rc = etxop->etxo_qcreate(enp, index, label, esmp,
+ n, id, flags, eep, etp, addedp)) != 0)
+ goto fail2;
+
+ enp->en_tx_qcount++;
+ *etpp = etp;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ EFSYS_ASSERT(enp->en_tx_qcount != 0);
+ --enp->en_tx_qcount;
+
+ etxop->etxo_qdestroy(etp);
+
+ /* Free the TXQ object */
+ EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_txq_t), etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpost(etp, eb,
+ n, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qpush(etp, added, pushed);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qpace(etp, ns)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qflush(etp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qenable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_enable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (~enp->en_features & EFX_FEATURE_PIO_BUFFERS) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ if (etxop->etxo_qpio_enable == NULL) {
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ if ((rc = etxop->etxo_qpio_enable(etp)) != 0)
+ goto fail3;
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qpio_disable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_disable != NULL)
+ etxop->etxo_qpio_disable(etp);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_write(
+ __in efx_txq_t *etp,
+ __in_ecount(buf_length) uint8_t *buffer,
+ __in size_t buf_length,
+ __in size_t pio_buf_offset)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_write != NULL) {
+ if ((rc = etxop->etxo_qpio_write(etp, buffer, buf_length,
+ pio_buf_offset)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qpio_post(
+ __in efx_txq_t *etp,
+ __in size_t pkt_length,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if (etxop->etxo_qpio_post != NULL) {
+ if ((rc = etxop->etxo_qpio_post(etp, pkt_length, completed,
+ addedp)) != 0)
+ goto fail1;
+ return (0);
+ }
+
+ return (ENOTSUP);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ if ((rc = etxop->etxo_qdesc_post(etp, ed,
+ n, completed, addedp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+efx_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_dma_create != NULL);
+
+ etxop->etxo_qdesc_dma_create(etp, addr, size, eop, edp);
+}
+
+ void
+efx_tx_qdesc_tso_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint8_t tcp_flags,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso_create != NULL);
+
+ etxop->etxo_qdesc_tso_create(etp, ipv4_id, tcp_seq, tcp_flags, edp);
+}
+
+ void
+efx_tx_qdesc_tso2_create(
+ __in efx_txq_t *etp,
+ __in uint16_t ipv4_id,
+ __in uint32_t tcp_seq,
+ __in uint16_t mss,
+ __out_ecount(count) efx_desc_t *edp,
+ __in int count)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_tso2_create != NULL);
+
+ etxop->etxo_qdesc_tso2_create(etp, ipv4_id, tcp_seq, mss, edp, count);
+}
+
+ void
+efx_tx_qdesc_vlantci_create(
+ __in efx_txq_t *etp,
+ __in uint16_t tci,
+ __out efx_desc_t *edp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+ EFSYS_ASSERT(etxop->etxo_qdesc_vlantci_create != NULL);
+
+ etxop->etxo_qdesc_vlantci_create(etp, tci, edp);
+}
+
+
+#if EFSYS_OPT_QSTATS
+ void
+efx_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ efx_nic_t *enp = etp->et_enp;
+ const efx_tx_ops_t *etxop = enp->en_etxop;
+
+ EFSYS_ASSERT3U(etp->et_magic, ==, EFX_TXQ_MAGIC);
+
+ etxop->etxo_qstats_update(etp, stat);
+}
+#endif
+
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_tx_init(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * Disable the timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level (although always allow a
+ * minimal trickle).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PUSH_EN, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+
+ /*
+ * Filter all packets less than 14 bytes to avoid parsing
+ * errors.
+ */
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_RESERVED_REG, &oword);
+
+ /*
+ * Do not set TX_NO_EOP_DISC_EN, since it limits packets to 16
+ * descriptors (which is bad).
+ */
+ EFX_BAR_READO(enp, FR_AZ_TX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_CFG_REG, &oword);
+
+ return (0);
+}
+
+#define EFX_TX_DESC(_etp, _addr, _size, _eop, _added) \
+ do { \
+ unsigned int id; \
+ size_t offset; \
+ efx_qword_t qword; \
+ \
+ id = (_added)++ & (_etp)->et_mask; \
+ offset = id * sizeof (efx_qword_t); \
+ \
+ EFSYS_PROBE5(tx_post, unsigned int, (_etp)->et_index, \
+ unsigned int, id, efsys_dma_addr_t, (_addr), \
+ size_t, (_size), boolean_t, (_eop)); \
+ \
+ EFX_POPULATE_QWORD_4(qword, \
+ FSF_AZ_TX_KER_CONT, (_eop) ? 0 : 1, \
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)(_size), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW0, \
+ (uint32_t)((_addr) & 0xffffffff), \
+ FSF_AZ_TX_KER_BUF_ADDR_DW1, \
+ (uint32_t)((_addr) >> 32)); \
+ EFSYS_MEM_WRITEQ((_etp)->et_esmp, offset, &qword); \
+ \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+static __checkReturn efx_rc_t
+siena_tx_qpost(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_buffer_t *eb,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ int rc = ENOSPC;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1))
+ goto fail1;
+
+ for (i = 0; i < n; i++) {
+ efx_buffer_t *ebp = &eb[i];
+ efsys_dma_addr_t start = ebp->eb_addr;
+ size_t size = ebp->eb_size;
+ efsys_dma_addr_t end = start + size;
+
+ /*
+ * Fragments must not span 4k boundaries.
+ * Here it is a stricter requirement than the maximum length.
+ */
+ EFSYS_ASSERT(P2ROUNDUP(start + 1,
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= end);
+
+ EFX_TX_DESC(etp, start, size, ebp->eb_eop, added);
+ }
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static void
+siena_tx_qpush(
+ __in efx_txq_t *etp,
+ __in unsigned int added,
+ __in unsigned int pushed)
+{
+ efx_nic_t *enp = etp->et_enp;
+ uint32_t wptr;
+ efx_dword_t dword;
+ efx_oword_t oword;
+
+ /* Push the populated descriptors out */
+ wptr = added & etp->et_mask;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DESC_WPTR, wptr);
+
+ /* Only write the third DWORD */
+ EFX_POPULATE_DWORD_1(dword,
+ EFX_DWORD_0, EFX_OWORD_FIELD(oword, EFX_DWORD_3));
+
+ /* Guarantee ordering of memory (descriptors) and PIO (doorbell) */
+ EFX_DMA_SYNC_QUEUE_FOR_DEVICE(etp->et_esmp, etp->et_mask + 1,
+ wptr, pushed & etp->et_mask);
+ EFSYS_PIO_WRITE_BARRIER();
+ EFX_BAR_TBL_WRITED3(enp, FR_BZ_TX_DESC_UPD_REGP0,
+ etp->et_index, &dword, B_FALSE);
+}
+
+#define EFX_MAX_PACE_VALUE 20
+
+static __checkReturn efx_rc_t
+siena_tx_qpace(
+ __in efx_txq_t *etp,
+ __in unsigned int ns)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ unsigned int pace_val;
+ unsigned int timer_period;
+ efx_rc_t rc;
+
+ if (ns == 0) {
+ pace_val = 0;
+ } else {
+ /*
+ * The pace_val to write into the table is s.t
+ * ns <= timer_period * (2 ^ pace_val)
+ */
+ timer_period = 104 / encp->enc_clk_mult;
+ for (pace_val = 1; pace_val <= EFX_MAX_PACE_VALUE; pace_val++) {
+ if ((timer_period << pace_val) >= ns)
+ break;
+ }
+ }
+ if (pace_val > EFX_MAX_PACE_VALUE) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Update the pacing table */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_PACE, pace_val);
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_PACE_TBL, etp->et_index,
+ &oword, B_TRUE);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qflush(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+ uint32_t label;
+
+ efx_tx_qpace(etp, 0);
+
+ label = etp->et_index;
+
+ /* Flush the queue */
+ EFX_POPULATE_OWORD_2(oword, FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, label);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_FLUSH_DESCQ_REG, &oword);
+
+ return (0);
+}
+
+static void
+siena_tx_qenable(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ EFX_BAR_TBL_READO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ EFSYS_PROBE5(tx_descq_ptr, unsigned int, etp->et_index,
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_3),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_2),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_1),
+ uint32_t, EFX_OWORD_FIELD(oword, EFX_DWORD_0));
+
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DC_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_HW_RPTR, 0);
+ EFX_SET_OWORD_FIELD(oword, FRF_AZ_TX_DESCQ_EN, 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static __checkReturn efx_rc_t
+siena_tx_qcreate(
+ __in efx_nic_t *enp,
+ __in unsigned int index,
+ __in unsigned int label,
+ __in efsys_mem_t *esmp,
+ __in size_t n,
+ __in uint32_t id,
+ __in uint16_t flags,
+ __in efx_evq_t *eep,
+ __in efx_txq_t *etp,
+ __out unsigned int *addedp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t size;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(esmp))
+
+ EFX_STATIC_ASSERT(EFX_EV_TX_NLABELS ==
+ (1 << FRF_AZ_TX_DESCQ_LABEL_WIDTH));
+ EFSYS_ASSERT3U(label, <, EFX_EV_TX_NLABELS);
+
+ EFSYS_ASSERT(ISP2(encp->enc_txq_max_ndescs));
+ EFX_STATIC_ASSERT(ISP2(EFX_TXQ_MINNDESCS));
+
+ if (!ISP2(n) || (n < EFX_TXQ_MINNDESCS) || (n > EFX_EVQ_MAXNEVS)) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ if (index >= encp->enc_txq_limit) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ for (size = 0;
+ (1 << size) <= (int)(encp->enc_txq_max_ndescs / EFX_TXQ_MINNDESCS);
+ size++)
+ if ((1 << size) == (int)(n / EFX_TXQ_MINNDESCS))
+ break;
+ if (id + (1 << size) >= encp->enc_buftbl_limit) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ /* Set up the new descriptor queue */
+ *addedp = 0;
+
+ EFX_POPULATE_OWORD_6(oword,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, id,
+ FRF_AZ_TX_DESCQ_EVQ_ID, eep->ee_index,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, label,
+ FRF_AZ_TX_DESCQ_SIZE, size,
+ FRF_AZ_TX_DESCQ_TYPE, 0);
+
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_IP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_TX_TCP_CHKSM_DIS,
+ (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_tx_qdesc_post(
+ __in efx_txq_t *etp,
+ __in_ecount(n) efx_desc_t *ed,
+ __in unsigned int n,
+ __in unsigned int completed,
+ __inout unsigned int *addedp)
+{
+ unsigned int added = *addedp;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if (added - completed + n > EFX_TXQ_LIMIT(etp->et_mask + 1)) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ for (i = 0; i < n; i++) {
+ efx_desc_t *edp = &ed[i];
+ unsigned int id;
+ size_t offset;
+
+ id = added++ & etp->et_mask;
+ offset = id * sizeof (efx_desc_t);
+
+ EFSYS_MEM_WRITEQ(etp->et_esmp, offset, &edp->ed_eq);
+ }
+
+ EFSYS_PROBE3(tx_desc_post, unsigned int, etp->et_index,
+ unsigned int, added, unsigned int, n);
+
+ EFX_TX_QSTAT_INCR(etp, TX_POST);
+
+ *addedp = added;
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+ return (rc);
+}
+
+ void
+siena_tx_qdesc_dma_create(
+ __in efx_txq_t *etp,
+ __in efsys_dma_addr_t addr,
+ __in size_t size,
+ __in boolean_t eop,
+ __out efx_desc_t *edp)
+{
+ /*
+ * Fragments must not span 4k boundaries.
+ * Here it is a stricter requirement than the maximum length.
+ */
+ EFSYS_ASSERT(P2ROUNDUP(addr + 1,
+ etp->et_enp->en_nic_cfg.enc_tx_dma_desc_boundary) >= addr + size);
+
+ EFSYS_PROBE4(tx_desc_dma_create, unsigned int, etp->et_index,
+ efsys_dma_addr_t, addr,
+ size_t, size, boolean_t, eop);
+
+ EFX_POPULATE_QWORD_4(edp->ed_eq,
+ FSF_AZ_TX_KER_CONT, eop ? 0 : 1,
+ FSF_AZ_TX_KER_BYTE_COUNT, (uint32_t)size,
+ FSF_AZ_TX_KER_BUF_ADDR_DW0,
+ (uint32_t)(addr & 0xffffffff),
+ FSF_AZ_TX_KER_BUF_ADDR_DW1,
+ (uint32_t)(addr >> 32));
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_QSTATS
+#if EFSYS_OPT_NAMES
+/* START MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock 2866874ecd7a363b */
+static const char * const __efx_tx_qstat_name[] = {
+ "post",
+ "post_pio",
+};
+/* END MKCONFIG GENERATED EfxTransmitQueueStatNamesBlock */
+
+ const char *
+efx_tx_qstat_name(
+ __in efx_nic_t *enp,
+ __in unsigned int id)
+{
+ _NOTE(ARGUNUSED(enp))
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(id, <, TX_NQSTATS);
+
+ return (__efx_tx_qstat_name[id]);
+}
+#endif /* EFSYS_OPT_NAMES */
+#endif /* EFSYS_OPT_QSTATS */
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_QSTATS
+static void
+siena_tx_qstats_update(
+ __in efx_txq_t *etp,
+ __inout_ecount(TX_NQSTATS) efsys_stat_t *stat)
+{
+ unsigned int id;
+
+ for (id = 0; id < TX_NQSTATS; id++) {
+ efsys_stat_t *essp = &stat[id];
+
+ EFSYS_STAT_INCR(essp, etp->et_stat[id]);
+ etp->et_stat[id] = 0;
+ }
+}
+#endif /* EFSYS_OPT_QSTATS */
+
+static void
+siena_tx_qdestroy(
+ __in efx_txq_t *etp)
+{
+ efx_nic_t *enp = etp->et_enp;
+ efx_oword_t oword;
+
+ /* Purge descriptor queue */
+ EFX_ZERO_OWORD(oword);
+
+ EFX_BAR_TBL_WRITEO(enp, FR_AZ_TX_DESC_PTR_TBL,
+ etp->et_index, &oword, B_TRUE);
+}
+
+static void
+siena_tx_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/efx_types.h b/drivers/net/sfc/base/efx_types.h
new file mode 100644
index 00000000..b8ee14a6
--- /dev/null
+++ b/drivers/net/sfc/base/efx_types.h
@@ -0,0 +1,1647 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ *
+ * Ackowledgement to Fen Systems Ltd.
+ */
+
+#ifndef _SYS_EFX_TYPES_H
+#define _SYS_EFX_TYPES_H
+
+#include "efsys.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Bitfield access
+ *
+ * Solarflare NICs make extensive use of bitfields up to 128 bits
+ * wide. Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian. Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and efx_dword_t)
+ * to be little-endian.
+ *
+ * In the less common case of using PIO for individual register
+ * writes, we construct the little-endian datatype in host memory and
+ * then use non-swapping register access primitives, rather than
+ * constructing a native-endian datatype and relying on implicit
+ * byte-swapping. (We use a similar strategy for register reads.)
+ */
+
+/*
+ * NOTE: Field definitions here and elsewhere are done in terms of a lowest
+ * bit number (LBN) and a width.
+ */
+
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+
+#define EFX_BYTE_0_LBN 0
+#define EFX_BYTE_0_WIDTH 8
+
+#define EFX_BYTE_1_LBN 8
+#define EFX_BYTE_1_WIDTH 8
+
+#define EFX_BYTE_2_LBN 16
+#define EFX_BYTE_2_WIDTH 8
+
+#define EFX_BYTE_3_LBN 24
+#define EFX_BYTE_3_WIDTH 8
+
+#define EFX_BYTE_4_LBN 32
+#define EFX_BYTE_4_WIDTH 8
+
+#define EFX_BYTE_5_LBN 40
+#define EFX_BYTE_5_WIDTH 8
+
+#define EFX_BYTE_6_LBN 48
+#define EFX_BYTE_6_WIDTH 8
+
+#define EFX_BYTE_7_LBN 56
+#define EFX_BYTE_7_WIDTH 8
+
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
+
+#define EFX_WORD_2_LBN 32
+#define EFX_WORD_2_WIDTH 16
+
+#define EFX_WORD_3_LBN 48
+#define EFX_WORD_3_WIDTH 16
+
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+
+/* There are intentionally no EFX_QWORD_0 or EFX_QWORD_1 field definitions
+ * here as the implementaion of EFX_QWORD_FIELD and EFX_OWORD_FIELD do not
+ * support field widths larger than 32 bits.
+ */
+
+/* Specified attribute (i.e. LBN ow WIDTH) of the specified field */
+#define EFX_VAL(_field, _attribute) \
+ _field ## _ ## _attribute
+
+/* Lowest bit number of the specified field */
+#define EFX_LOW_BIT(_field) \
+ EFX_VAL(_field, LBN)
+
+/* Width of the specified field */
+#define EFX_WIDTH(_field) \
+ EFX_VAL(_field, WIDTH)
+
+/* Highest bit number of the specified field */
+#define EFX_HIGH_BIT(_field) \
+ (EFX_LOW_BIT(_field) + EFX_WIDTH(_field) - 1)
+
+/*
+ * 64-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x000000000000001f.
+ */
+#define EFX_MASK64(_field) \
+ ((EFX_WIDTH(_field) == 64) ? ~((uint64_t)0) : \
+ (((((uint64_t)1) << EFX_WIDTH(_field))) - 1))
+/*
+ * 32-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x0000001f.
+ */
+#define EFX_MASK32(_field) \
+ ((EFX_WIDTH(_field) == 32) ? ~((uint32_t)0) : \
+ (((((uint32_t)1) << EFX_WIDTH(_field))) - 1))
+
+/*
+ * 16-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x001f.
+ */
+#define EFX_MASK16(_field) \
+ ((EFX_WIDTH(_field) == 16) ? 0xffffu : \
+ (uint16_t)((1 << EFX_WIDTH(_field)) - 1))
+
+/*
+ * 8-bit mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ */
+#define EFX_MASK8(_field) \
+ ((uint8_t)((1 << EFX_WIDTH(_field)) - 1))
+
+#pragma pack(1)
+
+/*
+ * A byte (i.e. 8-bit) datatype
+ */
+typedef union efx_byte_u {
+ uint8_t eb_u8[1];
+} efx_byte_t;
+
+/*
+ * A word (i.e. 16-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_word_u {
+ efx_byte_t ew_byte[2];
+ uint16_t ew_u16[1];
+ uint8_t ew_u8[2];
+} efx_word_t;
+
+/*
+ * A doubleword (i.e. 32-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_dword_u {
+ efx_byte_t ed_byte[4];
+ efx_word_t ed_word[2];
+ uint32_t ed_u32[1];
+ uint16_t ed_u16[2];
+ uint8_t ed_u8[4];
+} efx_dword_t;
+
+/*
+ * A quadword (i.e. 64-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_qword_u {
+ efx_byte_t eq_byte[8];
+ efx_word_t eq_word[4];
+ efx_dword_t eq_dword[2];
+#if EFSYS_HAS_UINT64
+ uint64_t eq_u64[1];
+#endif
+ uint32_t eq_u32[2];
+ uint16_t eq_u16[4];
+ uint8_t eq_u8[8];
+} efx_qword_t;
+
+/*
+ * An octword (i.e. 128-bit) datatype
+ *
+ * This datatype is defined to be little-endian.
+ */
+typedef union efx_oword_u {
+ efx_byte_t eo_byte[16];
+ efx_word_t eo_word[8];
+ efx_dword_t eo_dword[4];
+ efx_qword_t eo_qword[2];
+#if EFSYS_HAS_SSE2_M128
+ __m128i eo_u128[1];
+#endif
+#if EFSYS_HAS_UINT64
+ uint64_t eo_u64[2];
+#endif
+ uint32_t eo_u32[4];
+ uint16_t eo_u16[8];
+ uint8_t eo_u8[16];
+} efx_oword_t;
+
+#pragma pack()
+
+#define __SWAP16(_x) \
+ ((((_x) & 0xff) << 8) | \
+ (((_x) >> 8) & 0xff))
+
+#define __SWAP32(_x) \
+ ((__SWAP16((_x) & 0xffff) << 16) | \
+ __SWAP16(((_x) >> 16) & 0xffff))
+
+#define __SWAP64(_x) \
+ ((__SWAP32((_x) & 0xffffffff) << 32) | \
+ __SWAP32(((_x) >> 32) & 0xffffffff))
+
+#define __NOSWAP16(_x) (_x)
+#define __NOSWAP32(_x) (_x)
+#define __NOSWAP64(_x) (_x)
+
+#if EFSYS_IS_BIG_ENDIAN
+
+#define __CPU_TO_LE_16(_x) ((uint16_t)__SWAP16(_x))
+#define __LE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x))
+#define __CPU_TO_BE_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __BE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x))
+
+#define __CPU_TO_LE_32(_x) ((uint32_t)__SWAP32(_x))
+#define __LE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x))
+#define __CPU_TO_BE_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __BE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x))
+
+#define __CPU_TO_LE_64(_x) ((uint64_t)__SWAP64(_x))
+#define __LE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x))
+#define __CPU_TO_BE_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __BE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x))
+
+#elif EFSYS_IS_LITTLE_ENDIAN
+
+#define __CPU_TO_LE_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __LE_TO_CPU_16(_x) ((uint16_t)__NOSWAP16(_x))
+#define __CPU_TO_BE_16(_x) ((uint16_t)__SWAP16(_x))
+#define __BE_TO_CPU_16(_x) ((uint16_t)__SWAP16(_x))
+
+#define __CPU_TO_LE_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __LE_TO_CPU_32(_x) ((uint32_t)__NOSWAP32(_x))
+#define __CPU_TO_BE_32(_x) ((uint32_t)__SWAP32(_x))
+#define __BE_TO_CPU_32(_x) ((uint32_t)__SWAP32(_x))
+
+#define __CPU_TO_LE_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __LE_TO_CPU_64(_x) ((uint64_t)__NOSWAP64(_x))
+#define __CPU_TO_BE_64(_x) ((uint64_t)__SWAP64(_x))
+#define __BE_TO_CPU_64(_x) ((uint64_t)__SWAP64(_x))
+
+#else
+
+#error "Neither of EFSYS_IS_{BIG,LITTLE}_ENDIAN is set"
+
+#endif
+
+#define __NATIVE_8(_x) (uint8_t)(_x)
+
+/* Format string for printing an efx_byte_t */
+#define EFX_BYTE_FMT "0x%02x"
+
+/* Format string for printing an efx_word_t */
+#define EFX_WORD_FMT "0x%04x"
+
+/* Format string for printing an efx_dword_t */
+#define EFX_DWORD_FMT "0x%08x"
+
+/* Format string for printing an efx_qword_t */
+#define EFX_QWORD_FMT "0x%08x:%08x"
+
+/* Format string for printing an efx_oword_t */
+#define EFX_OWORD_FMT "0x%08x:%08x:%08x:%08x"
+
+/* Parameters for printing an efx_byte_t */
+#define EFX_BYTE_VAL(_byte) \
+ ((unsigned int)__NATIVE_8((_byte).eb_u8[0]))
+
+/* Parameters for printing an efx_word_t */
+#define EFX_WORD_VAL(_word) \
+ ((unsigned int)__LE_TO_CPU_16((_word).ew_u16[0]))
+
+/* Parameters for printing an efx_dword_t */
+#define EFX_DWORD_VAL(_dword) \
+ ((unsigned int)__LE_TO_CPU_32((_dword).ed_u32[0]))
+
+/* Parameters for printing an efx_qword_t */
+#define EFX_QWORD_VAL(_qword) \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_qword).eq_u32[0]))
+
+/* Parameters for printing an efx_oword_t */
+#define EFX_OWORD_VAL(_oword) \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[3])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[2])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[1])), \
+ ((unsigned int)__LE_TO_CPU_32((_oword).eo_u32[0]))
+
+/*
+ * Stop lint complaining about some shifts.
+ */
+#ifdef __lint
+extern int fix_lint;
+#define FIX_LINT(_x) (_x + fix_lint)
+#else
+#define FIX_LINT(_x) (_x)
+#endif
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT(_element, 32, 63, 28, 45) would give
+ *
+ * (_element) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(_element, _min, _max, _low, _high) \
+ ((FIX_LINT(_low > _max) || FIX_LINT(_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ ((_element) >> (_low - _min)) : \
+ ((_element) << (_min - _low))))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_64(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_32(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 16-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT16(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__LE_TO_CPU_16(_element), _min, _max, _low, _high)
+
+/*
+ * Extract bit field portion [low,high) from the 8-bit
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT8(_element, _min, _max, _low, _high) \
+ EFX_EXTRACT_NATIVE(__NATIVE_8(_element), _min, _max, _low, _high)
+
+#define EFX_EXTRACT_OWORD64(_oword, _low, _high) \
+ (EFX_EXTRACT64((_oword).eo_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT64((_oword).eo_u64[1], FIX_LINT(64), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_OWORD32(_oword, _low, _high) \
+ (EFX_EXTRACT32((_oword).eo_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[2], FIX_LINT(64), FIX_LINT(95), \
+ _low, _high) | \
+ EFX_EXTRACT32((_oword).eo_u32[3], FIX_LINT(96), FIX_LINT(127), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD64(_qword, _low, _high) \
+ (EFX_EXTRACT64((_qword).eq_u64[0], FIX_LINT(0), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_QWORD32(_qword, _low, _high) \
+ (EFX_EXTRACT32((_qword).eq_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high) | \
+ EFX_EXTRACT32((_qword).eq_u32[1], FIX_LINT(32), FIX_LINT(63), \
+ _low, _high))
+
+#define EFX_EXTRACT_DWORD(_dword, _low, _high) \
+ (EFX_EXTRACT32((_dword).ed_u32[0], FIX_LINT(0), FIX_LINT(31), \
+ _low, _high))
+
+#define EFX_EXTRACT_WORD(_word, _low, _high) \
+ (EFX_EXTRACT16((_word).ew_u16[0], FIX_LINT(0), FIX_LINT(15), \
+ _low, _high))
+
+#define EFX_EXTRACT_BYTE(_byte, _low, _high) \
+ (EFX_EXTRACT8((_byte).eb_u8[0], FIX_LINT(0), FIX_LINT(7), \
+ _low, _high))
+
+
+#define EFX_OWORD_FIELD64(_oword, _field) \
+ ((uint32_t)EFX_EXTRACT_OWORD64(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_OWORD_FIELD32(_oword, _field) \
+ (EFX_EXTRACT_OWORD32(_oword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD64(_qword, _field) \
+ ((uint32_t)EFX_EXTRACT_QWORD64(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_QWORD_FIELD32(_qword, _field) \
+ (EFX_EXTRACT_QWORD32(_qword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_DWORD_FIELD(_dword, _field) \
+ (EFX_EXTRACT_DWORD(_dword, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK32(_field))
+
+#define EFX_WORD_FIELD(_word, _field) \
+ (EFX_EXTRACT_WORD(_word, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK16(_field))
+
+#define EFX_BYTE_FIELD(_byte, _field) \
+ (EFX_EXTRACT_BYTE(_byte, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field)) & EFX_MASK8(_field))
+
+
+#define EFX_OWORD_IS_EQUAL64(_oword_a, _oword_b) \
+ ((_oword_a).eo_u64[0] == (_oword_b).eo_u64[0] && \
+ (_oword_a).eo_u64[1] == (_oword_b).eo_u64[1])
+
+#define EFX_OWORD_IS_EQUAL32(_oword_a, _oword_b) \
+ ((_oword_a).eo_u32[0] == (_oword_b).eo_u32[0] && \
+ (_oword_a).eo_u32[1] == (_oword_b).eo_u32[1] && \
+ (_oword_a).eo_u32[2] == (_oword_b).eo_u32[2] && \
+ (_oword_a).eo_u32[3] == (_oword_b).eo_u32[3])
+
+#define EFX_QWORD_IS_EQUAL64(_qword_a, _qword_b) \
+ ((_qword_a).eq_u64[0] == (_qword_b).eq_u64[0])
+
+#define EFX_QWORD_IS_EQUAL32(_qword_a, _qword_b) \
+ ((_qword_a).eq_u32[0] == (_qword_b).eq_u32[0] && \
+ (_qword_a).eq_u32[1] == (_qword_b).eq_u32[1])
+
+#define EFX_DWORD_IS_EQUAL(_dword_a, _dword_b) \
+ ((_dword_a).ed_u32[0] == (_dword_b).ed_u32[0])
+
+#define EFX_WORD_IS_EQUAL(_word_a, _word_b) \
+ ((_word_a).ew_u16[0] == (_word_b).ew_u16[0])
+
+#define EFX_BYTE_IS_EQUAL(_byte_a, _byte_b) \
+ ((_byte_a).eb_u8[0] == (_byte_b).eb_u8[0])
+
+
+#define EFX_OWORD_IS_ZERO64(_oword) \
+ (((_oword).eo_u64[0] | \
+ (_oword).eo_u64[1]) == 0)
+
+#define EFX_OWORD_IS_ZERO32(_oword) \
+ (((_oword).eo_u32[0] | \
+ (_oword).eo_u32[1] | \
+ (_oword).eo_u32[2] | \
+ (_oword).eo_u32[3]) == 0)
+
+#define EFX_QWORD_IS_ZERO64(_qword) \
+ (((_qword).eq_u64[0]) == 0)
+
+#define EFX_QWORD_IS_ZERO32(_qword) \
+ (((_qword).eq_u32[0] | \
+ (_qword).eq_u32[1]) == 0)
+
+#define EFX_DWORD_IS_ZERO(_dword) \
+ (((_dword).ed_u32[0]) == 0)
+
+#define EFX_WORD_IS_ZERO(_word) \
+ (((_word).ew_u16[0]) == 0)
+
+#define EFX_BYTE_IS_ZERO(_byte) \
+ (((_byte).eb_u8[0]) == 0)
+
+
+#define EFX_OWORD_IS_SET64(_oword) \
+ (((_oword).eo_u64[0] & \
+ (_oword).eo_u64[1]) == ~((uint64_t)0))
+
+#define EFX_OWORD_IS_SET32(_oword) \
+ (((_oword).eo_u32[0] & \
+ (_oword).eo_u32[1] & \
+ (_oword).eo_u32[2] & \
+ (_oword).eo_u32[3]) == ~((uint32_t)0))
+
+#define EFX_QWORD_IS_SET64(_qword) \
+ (((_qword).eq_u64[0]) == ~((uint64_t)0))
+
+#define EFX_QWORD_IS_SET32(_qword) \
+ (((_qword).eq_u32[0] & \
+ (_qword).eq_u32[1]) == ~((uint32_t)0))
+
+#define EFX_DWORD_IS_SET(_dword) \
+ ((_dword).ed_u32[0] == ~((uint32_t)0))
+
+#define EFX_WORD_IS_SET(_word) \
+ ((_word).ew_u16[0] == ~((uint16_t)0))
+
+#define EFX_BYTE_IS_SET(_byte) \
+ ((_byte).eb_u8[0] == ~((uint8_t)0))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+
+#define EFX_INSERT_NATIVE64(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint64_t)(_value)) << (_low - _min)) : \
+ (((uint64_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE32(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ ((_low > _min) ? \
+ (((uint32_t)(_value)) << (_low - _min)) : \
+ (((uint32_t)(_value)) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE16(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint16_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+#define EFX_INSERT_NATIVE8(_min, _max, _low, _high, _value) \
+ (((_low > _max) || (_high < _min)) ? \
+ 0U : \
+ (uint8_t)((_low > _min) ? \
+ ((_value) << (_low - _min)) : \
+ ((_value) >> (_min - _low))))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE64(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE32(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE16(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+#define EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value) \
+ EFX_INSERT_NATIVE8(_min, _max, EFX_LOW_BIT(_field), \
+ EFX_HIGH_BIT(_field), _value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS64(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_64( \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE64(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS32(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_32( \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE32(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS16(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __CPU_TO_LE_16( \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE16(_min, _max, _field10, _value10))
+
+#define EFX_INSERT_FIELDS8(_min, _max, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ __NATIVE_8( \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field1, _value1) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field2, _value2) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field3, _value3) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field4, _value4) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field5, _value5) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field6, _value6) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field7, _value7) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field8, _value8) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field9, _value9) | \
+ EFX_INSERT_FIELD_NATIVE8(_min, _max, _field10, _value10))
+
+#define EFX_POPULATE_OWORD64(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = EFX_INSERT_FIELDS64(64, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_OWORD32(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = EFX_INSERT_FIELDS32(64, 95, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = EFX_INSERT_FIELDS32(96, 127, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD64(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = EFX_INSERT_FIELDS64(0, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_QWORD32(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = EFX_INSERT_FIELDS32(32, 63, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_DWORD(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = EFX_INSERT_FIELDS32(0, 31, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_WORD(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = EFX_INSERT_FIELDS16(0, 15, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_POPULATE_BYTE(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9, \
+ _field10, _value10) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = EFX_INSERT_FIELDS8(0, 7, \
+ _field1, _value1, _field2, _value2, \
+ _field3, _value3, _field4, _value4, \
+ _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, \
+ _field9, _value9, _field10, _value10); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+
+#define EFX_POPULATE_OWORD_9(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_OWORD_10(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_OWORD_8(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_OWORD_9(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_OWORD_7(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_OWORD_8(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_OWORD_6(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_OWORD_7(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_OWORD_5(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_OWORD_6(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_OWORD_4(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_OWORD_5(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_OWORD_3(_oword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_OWORD_4(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_OWORD_2(_oword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_OWORD_3(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_OWORD_1(_oword, \
+ _field1, _value1) \
+ EFX_POPULATE_OWORD_2(_oword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_OWORD(_oword) \
+ EFX_POPULATE_OWORD_1(_oword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_OWORD(_oword) \
+ EFX_POPULATE_OWORD_4(_oword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff, \
+ EFX_DWORD_2, 0xffffffff, EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+
+#define EFX_POPULATE_QWORD_9(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_QWORD_10(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_QWORD_8(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_QWORD_9(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_QWORD_7(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_QWORD_8(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_QWORD_6(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_QWORD_7(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_QWORD_5(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_QWORD_6(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_QWORD_4(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_QWORD_5(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_QWORD_3(_qword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_QWORD_4(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_QWORD_2(_qword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_QWORD_3(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_QWORD_1(_qword, \
+ _field1, _value1) \
+ EFX_POPULATE_QWORD_2(_qword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_QWORD(_qword) \
+ EFX_POPULATE_QWORD_1(_qword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_QWORD(_qword) \
+ EFX_POPULATE_QWORD_2(_qword, \
+ EFX_DWORD_0, 0xffffffff, EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+
+#define EFX_POPULATE_DWORD_9(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_DWORD_10(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_DWORD_8(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_DWORD_9(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_DWORD_7(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_DWORD_8(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_DWORD_6(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_DWORD_7(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_DWORD_5(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_DWORD_6(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_DWORD_4(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_DWORD_5(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_DWORD_3(_dword, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_DWORD_4(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_DWORD_2(_dword, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_DWORD_3(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_DWORD_1(_dword, \
+ _field1, _value1) \
+ EFX_POPULATE_DWORD_2(_dword, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_DWORD(_dword) \
+ EFX_POPULATE_DWORD_1(_dword, \
+ EFX_DWORD_0, 0xffffffff)
+
+/* Populate a word field with various numbers of arguments */
+#define EFX_POPULATE_WORD_10 EFX_POPULATE_WORD
+
+#define EFX_POPULATE_WORD_9(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_WORD_10(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_WORD_8(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_WORD_9(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_WORD_7(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_WORD_8(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_WORD_6(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_WORD_7(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_WORD_5(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_WORD_6(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_WORD_4(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_WORD_5(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_WORD_3(_word, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_WORD_4(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_WORD_2(_word, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_WORD_3(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_WORD_1(_word, \
+ _field1, _value1) \
+ EFX_POPULATE_WORD_2(_word, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_WORD(_word) \
+ EFX_POPULATE_WORD_1(_word, \
+ EFX_WORD_0, 0xffff)
+
+/* Populate a byte field with various numbers of arguments */
+#define EFX_POPULATE_BYTE_10 EFX_POPULATE_BYTE
+
+#define EFX_POPULATE_BYTE_9(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9) \
+ EFX_POPULATE_BYTE_10(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8, _field9, _value9)
+
+#define EFX_POPULATE_BYTE_8(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8) \
+ EFX_POPULATE_BYTE_9(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7, _field8, _value8)
+
+#define EFX_POPULATE_BYTE_7(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7) \
+ EFX_POPULATE_BYTE_8(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6, \
+ _field7, _value7)
+
+#define EFX_POPULATE_BYTE_6(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6) \
+ EFX_POPULATE_BYTE_7(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5, _field6, _value6)
+
+#define EFX_POPULATE_BYTE_5(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5) \
+ EFX_POPULATE_BYTE_6(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4, _field5, _value5)
+
+#define EFX_POPULATE_BYTE_4(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4) \
+ EFX_POPULATE_BYTE_5(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3, \
+ _field4, _value4)
+
+#define EFX_POPULATE_BYTE_3(_byte, \
+ _field1, _value1, _field2, _value2, _field3, _value3) \
+ EFX_POPULATE_BYTE_4(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2, _field3, _value3)
+
+#define EFX_POPULATE_BYTE_2(_byte, \
+ _field1, _value1, _field2, _value2) \
+ EFX_POPULATE_BYTE_3(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1, _field2, _value2)
+
+#define EFX_POPULATE_BYTE_1(_byte, \
+ _field1, _value1) \
+ EFX_POPULATE_BYTE_2(_byte, EFX_DUMMY_FIELD, 0, \
+ _field1, _value1)
+
+#define EFX_ZERO_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, EFX_DUMMY_FIELD, 0)
+
+#define EFX_SET_BYTE(_byte) \
+ EFX_POPULATE_BYTE_1(_byte, \
+ EFX_BYTE_0, 0xff)
+
+/*
+ * Modify a named field within an already-populated structure. Used
+ * for read-modify-write operations.
+ */
+
+#define EFX_INSERT_FIELD64(_min, _max, _field, _value) \
+ __CPU_TO_LE_64(EFX_INSERT_FIELD_NATIVE64(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD32(_min, _max, _field, _value) \
+ __CPU_TO_LE_32(EFX_INSERT_FIELD_NATIVE32(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD16(_min, _max, _field, _value) \
+ __CPU_TO_LE_16(EFX_INSERT_FIELD_NATIVE16(_min, _max, _field, _value))
+
+#define EFX_INSERT_FIELD8(_min, _max, _field, _value) \
+ __NATIVE_8(EFX_INSERT_FIELD_NATIVE8(_min, _max, _field, _value))
+
+#define EFX_INPLACE_MASK64(_min, _max, _field) \
+ EFX_INSERT_FIELD64(_min, _max, _field, EFX_MASK64(_field))
+
+#define EFX_INPLACE_MASK32(_min, _max, _field) \
+ EFX_INSERT_FIELD32(_min, _max, _field, EFX_MASK32(_field))
+
+#define EFX_INPLACE_MASK16(_min, _max, _field) \
+ EFX_INSERT_FIELD16(_min, _max, _field, EFX_MASK16(_field))
+
+#define EFX_INPLACE_MASK8(_min, _max, _field) \
+ EFX_INSERT_FIELD8(_min, _max, _field, EFX_MASK8(_field))
+
+#define EFX_SET_OWORD_FIELD64(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] = (((_oword).eo_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[1] = (((_oword).eo_u64[1] & \
+ ~EFX_INPLACE_MASK64(64, 127, _field)) | \
+ EFX_INSERT_FIELD64(64, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_FIELD32(_oword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] = (((_oword).eo_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[1] = (((_oword).eo_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[2] = (((_oword).eo_u32[2] & \
+ ~EFX_INPLACE_MASK32(64, 95, _field)) | \
+ EFX_INSERT_FIELD32(64, 95, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[3] = (((_oword).eo_u32[3] & \
+ ~EFX_INPLACE_MASK32(96, 127, _field)) | \
+ EFX_INSERT_FIELD32(96, 127, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD64(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] = (((_qword).eq_u64[0] & \
+ ~EFX_INPLACE_MASK64(0, 63, _field)) | \
+ EFX_INSERT_FIELD64(0, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_FIELD32(_qword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] = (((_qword).eq_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[1] = (((_qword).eq_u32[1] & \
+ ~EFX_INPLACE_MASK32(32, 63, _field)) | \
+ EFX_INSERT_FIELD32(32, 63, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_DWORD_FIELD(_dword, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_dword).ed_u32[0] = (((_dword).ed_u32[0] & \
+ ~EFX_INPLACE_MASK32(0, 31, _field)) | \
+ EFX_INSERT_FIELD32(0, 31, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_WORD_FIELD(_word, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_word).ew_u16[0] = (((_word).ew_u16[0] & \
+ ~EFX_INPLACE_MASK16(0, 15, _field)) | \
+ EFX_INSERT_FIELD16(0, 15, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_BYTE_FIELD(_byte, _field, _value) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_byte).eb_u8[0] = (((_byte).eb_u8[0] & \
+ ~EFX_INPLACE_MASK8(0, 7, _field)) | \
+ EFX_INSERT_FIELD8(0, 7, _field, _value)); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+/*
+ * Set or clear a numbered bit within an octword.
+ */
+
+#define EFX_SHIFT64(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 64) ? \
+ ((uint64_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT32(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 32) ? \
+ ((uint32_t)1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT16(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 16) ? \
+ (uint16_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SHIFT8(_bit, _base) \
+ (((_bit) >= (_base) && (_bit) < (_base) + 8) ? \
+ (uint8_t)(1 << ((_bit) - (_base))) : \
+ 0U)
+
+#define EFX_SET_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT64(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ (_oword).eo_u64[1] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(64))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_OWORD_BIT32(_oword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_oword).eo_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_oword).eo_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ (_oword).eo_u32[2] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(64))); \
+ (_oword).eo_u32[3] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(96))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_OWORD_BIT64(_oword, _bit) \
+ (((_oword).eo_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) || \
+ ((_oword).eo_u64[1] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(64)))))
+
+#define EFX_TEST_OWORD_BIT32(_oword, _bit) \
+ (((_oword).eo_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \
+ ((_oword).eo_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))) || \
+ ((_oword).eo_u32[2] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(64)))) || \
+ ((_oword).eo_u32[3] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(96)))))
+
+
+#define EFX_SET_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] |= \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_SET_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT64(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u64[0] &= \
+ __CPU_TO_LE_64(~EFX_SHIFT64(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_QWORD_BIT32(_qword, _bit) \
+ do { \
+ _NOTE(CONSTANTCONDITION) \
+ (_qword).eq_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ (_qword).eq_u32[1] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(32))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_QWORD_BIT64(_qword, _bit) \
+ (((_qword).eq_u64[0] & \
+ __CPU_TO_LE_64(EFX_SHIFT64(_bit, FIX_LINT(0)))) != 0)
+
+#define EFX_TEST_QWORD_BIT32(_qword, _bit) \
+ (((_qword).eq_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) || \
+ ((_qword).eq_u32[1] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(32)))))
+
+
+#define EFX_SET_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] |= \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_DWORD_BIT(_dword, _bit) \
+ do { \
+ (_dword).ed_u32[0] &= \
+ __CPU_TO_LE_32(~EFX_SHIFT32(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_DWORD_BIT(_dword, _bit) \
+ (((_dword).ed_u32[0] & \
+ __CPU_TO_LE_32(EFX_SHIFT32(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_SET_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u16[0] |= \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_WORD_BIT(_word, _bit) \
+ do { \
+ (_word).ew_u32[0] &= \
+ __CPU_TO_LE_16(~EFX_SHIFT16(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_WORD_BIT(_word, _bit) \
+ (((_word).ew_u16[0] & \
+ __CPU_TO_LE_16(EFX_SHIFT16(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_SET_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] |= \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_CLEAR_BYTE_BIT(_byte, _bit) \
+ do { \
+ (_byte).eb_u8[0] &= \
+ __NATIVE_8(~EFX_SHIFT8(_bit, FIX_LINT(0))); \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_TEST_BYTE_BIT(_byte, _bit) \
+ (((_byte).eb_u8[0] & \
+ __NATIVE_8(EFX_SHIFT8(_bit, FIX_LINT(0)))) != 0)
+
+
+#define EFX_OR_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] |= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] |= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] |= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] |= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] |= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] |= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD64(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u64[0] &= (_oword2).eo_u64[0]; \
+ (_oword1).eo_u64[1] &= (_oword2).eo_u64[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_OWORD32(_oword1, _oword2) \
+ do { \
+ (_oword1).eo_u32[0] &= (_oword2).eo_u32[0]; \
+ (_oword1).eo_u32[1] &= (_oword2).eo_u32[1]; \
+ (_oword1).eo_u32[2] &= (_oword2).eo_u32[2]; \
+ (_oword1).eo_u32[3] &= (_oword2).eo_u32[3]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] |= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] |= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] |= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD64(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u64[0] &= (_qword2).eq_u64[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_QWORD32(_qword1, _qword2) \
+ do { \
+ (_qword1).eq_u32[0] &= (_qword2).eq_u32[0]; \
+ (_qword1).eq_u32[1] &= (_qword2).eq_u32[1]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] |= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_DWORD(_dword1, _dword2) \
+ do { \
+ (_dword1).ed_u32[0] &= (_dword2).ed_u32[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] |= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_WORD(_word1, _word2) \
+ do { \
+ (_word1).ew_u16[0] &= (_word2).ew_u16[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_OR_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] |= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#define EFX_AND_BYTE(_byte1, _byte2) \
+ do { \
+ (_byte1).eb_u8[0] &= (_byte2).eb_u8[0]; \
+ _NOTE(CONSTANTCONDITION) \
+ } while (B_FALSE)
+
+#if EFSYS_USE_UINT64
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL64
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL64
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET64
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT64
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT64
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT64
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT64
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT64
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT64
+#define EFX_OR_OWORD EFX_OR_OWORD64
+#define EFX_AND_OWORD EFX_AND_OWORD64
+#define EFX_OR_QWORD EFX_OR_QWORD64
+#define EFX_AND_QWORD EFX_AND_QWORD64
+#else
+#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_EQUAL EFX_OWORD_IS_EQUAL32
+#define EFX_QWORD_IS_EQUAL EFX_QWORD_IS_EQUAL32
+#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_SET EFX_OWORD_IS_SET32
+#define EFX_QWORD_IS_SET EFX_QWORD_IS_SET32
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#define EFX_SET_OWORD_BIT EFX_SET_OWORD_BIT32
+#define EFX_CLEAR_OWORD_BIT EFX_CLEAR_OWORD_BIT32
+#define EFX_TEST_OWORD_BIT EFX_TEST_OWORD_BIT32
+#define EFX_SET_QWORD_BIT EFX_SET_QWORD_BIT32
+#define EFX_CLEAR_QWORD_BIT EFX_CLEAR_QWORD_BIT32
+#define EFX_TEST_QWORD_BIT EFX_TEST_QWORD_BIT32
+#define EFX_OR_OWORD EFX_OR_OWORD32
+#define EFX_AND_OWORD EFX_AND_OWORD32
+#define EFX_OR_QWORD EFX_OR_QWORD32
+#define EFX_AND_QWORD EFX_AND_QWORD32
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_EFX_TYPES_H */
diff --git a/drivers/net/sfc/base/efx_vpd.c b/drivers/net/sfc/base/efx_vpd.c
new file mode 100644
index 00000000..1e47df2c
--- /dev/null
+++ b/drivers/net/sfc/base/efx_vpd.c
@@ -0,0 +1,1016 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#define TAG_TYPE_LBN 7
+#define TAG_TYPE_WIDTH 1
+#define TAG_TYPE_LARGE_ITEM_DECODE 1
+#define TAG_TYPE_SMALL_ITEM_DECODE 0
+
+#define TAG_SMALL_ITEM_NAME_LBN 3
+#define TAG_SMALL_ITEM_NAME_WIDTH 4
+#define TAG_SMALL_ITEM_SIZE_LBN 0
+#define TAG_SMALL_ITEM_SIZE_WIDTH 3
+
+#define TAG_LARGE_ITEM_NAME_LBN 0
+#define TAG_LARGE_ITEM_NAME_WIDTH 7
+
+#define TAG_NAME_END_DECODE 0x0f
+#define TAG_NAME_ID_STRING_DECODE 0x02
+#define TAG_NAME_VPD_R_DECODE 0x10
+#define TAG_NAME_VPD_W_DECODE 0x11
+
+#if EFSYS_OPT_SIENA
+
+static const efx_vpd_ops_t __efx_vpd_siena_ops = {
+ siena_vpd_init, /* evpdo_init */
+ siena_vpd_size, /* evpdo_size */
+ siena_vpd_read, /* evpdo_read */
+ siena_vpd_verify, /* evpdo_verify */
+ siena_vpd_reinit, /* evpdo_reinit */
+ siena_vpd_get, /* evpdo_get */
+ siena_vpd_set, /* evpdo_set */
+ siena_vpd_next, /* evpdo_next */
+ siena_vpd_write, /* evpdo_write */
+ siena_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD
+
+static const efx_vpd_ops_t __efx_vpd_ef10_ops = {
+ ef10_vpd_init, /* evpdo_init */
+ ef10_vpd_size, /* evpdo_size */
+ ef10_vpd_read, /* evpdo_read */
+ ef10_vpd_verify, /* evpdo_verify */
+ ef10_vpd_reinit, /* evpdo_reinit */
+ ef10_vpd_get, /* evpdo_get */
+ ef10_vpd_set, /* evpdo_set */
+ ef10_vpd_next, /* evpdo_next */
+ ef10_vpd_write, /* evpdo_write */
+ ef10_vpd_fini, /* evpdo_fini */
+};
+
+#endif /* EFSYS_OPT_HUNTINGTON || EFSYS_OPT_MEDFORD */
+
+ __checkReturn efx_rc_t
+efx_vpd_init(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_VPD));
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ evpdop = &__efx_vpd_siena_ops;
+ break;
+#endif /* EFSYS_OPT_SIENA */
+
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_HUNTINGTON */
+
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ evpdop = &__efx_vpd_ef10_ops;
+ break;
+#endif /* EFSYS_OPT_MEDFORD */
+
+ default:
+ EFSYS_ASSERT(0);
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if (evpdop->evpdo_init != NULL) {
+ if ((rc = evpdop->evpdo_init(enp)) != 0)
+ goto fail2;
+ }
+
+ enp->en_evpdop = evpdop;
+ enp->en_mod_flags |= EFX_MOD_VPD;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_size(enp, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_read(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_verify(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_reinit == NULL) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = evpdop->evpdo_reinit(enp, data, size)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_get(enp, data, size, evvp)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_set(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_set(enp, data, size, evvp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_next(
+ __in efx_nic_t *enp,
+ __inout_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_next(enp, data, size, evvp, contp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if ((rc = evpdop->evpdo_write(enp, data, size)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_tag(
+ __in caddr_t data,
+ __in size_t size,
+ __inout unsigned int *offsetp,
+ __out efx_vpd_tag_t *tagp,
+ __out uint16_t *lengthp)
+{
+ efx_byte_t byte;
+ efx_word_t word;
+ uint8_t name;
+ uint16_t length;
+ size_t headlen;
+ efx_rc_t rc;
+
+ if (*offsetp >= size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ EFX_POPULATE_BYTE_1(byte, EFX_BYTE_0, data[*offsetp]);
+
+ switch (EFX_BYTE_FIELD(byte, TAG_TYPE)) {
+ case TAG_TYPE_SMALL_ITEM_DECODE:
+ headlen = 1;
+
+ name = EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_NAME);
+ length = (uint16_t)EFX_BYTE_FIELD(byte, TAG_SMALL_ITEM_SIZE);
+
+ break;
+
+ case TAG_TYPE_LARGE_ITEM_DECODE:
+ headlen = 3;
+
+ if (*offsetp + headlen > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ name = EFX_BYTE_FIELD(byte, TAG_LARGE_ITEM_NAME);
+ EFX_POPULATE_WORD_2(word,
+ EFX_BYTE_0, data[*offsetp + 1],
+ EFX_BYTE_1, data[*offsetp + 2]);
+ length = EFX_WORD_FIELD(word, EFX_WORD_0);
+
+ break;
+
+ default:
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if (*offsetp + headlen + length > size) {
+ rc = EFAULT;
+ goto fail3;
+ }
+
+ EFX_STATIC_ASSERT(TAG_NAME_END_DECODE == EFX_VPD_END);
+ EFX_STATIC_ASSERT(TAG_NAME_ID_STRING_DECODE == EFX_VPD_ID);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_R_DECODE == EFX_VPD_RO);
+ EFX_STATIC_ASSERT(TAG_NAME_VPD_W_DECODE == EFX_VPD_RW);
+ if (name != EFX_VPD_END && name != EFX_VPD_ID &&
+ name != EFX_VPD_RO) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ *tagp = name;
+ *lengthp = length;
+ *offsetp += headlen;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_vpd_next_keyword(
+ __in_bcount(size) caddr_t tag,
+ __in size_t size,
+ __in unsigned int pos,
+ __out efx_vpd_keyword_t *keywordp,
+ __out uint8_t *lengthp)
+{
+ efx_vpd_keyword_t keyword;
+ uint8_t length;
+ efx_rc_t rc;
+
+ if (pos + 3U > size) {
+ rc = EFAULT;
+ goto fail1;
+ }
+
+ keyword = EFX_VPD_KEYWORD(tag[pos], tag[pos + 1]);
+ length = tag[pos + 2];
+
+ if (length == 0 || pos + 3U + length > size) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ *keywordp = keyword;
+ *lengthp = length;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_length(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out size_t *lengthp)
+{
+ efx_vpd_tag_t tag;
+ unsigned int offset;
+ uint16_t taglen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ offset += taglen;
+ if (tag == EFX_VPD_END)
+ break;
+ }
+
+ *lengthp = offset;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_verify(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out_opt boolean_t *cksummedp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ boolean_t cksummed = B_FALSE;
+ efx_rc_t rc;
+
+ /*
+ * Parse every tag,keyword in the existing VPD. If the csum is present,
+ * the assert it is correct, and is the final keyword in the RO block.
+ */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag == EFX_VPD_ID)
+ goto done;
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ /* RV keyword must be the last in the block */
+ if (cksummed) {
+ rc = EFAULT;
+ goto fail2;
+ }
+
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail3;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 4; i++)
+ cksum += data[i];
+
+ if (cksum != 0) {
+ rc = EFAULT;
+ goto fail4;
+ }
+
+ cksummed = B_TRUE;
+ }
+ }
+
+ done:
+ offset += taglen;
+ }
+
+ if (!cksummed) {
+ rc = EFAULT;
+ goto fail5;
+ }
+
+ if (cksummedp != NULL)
+ *cksummedp = cksummed;
+
+ return (0);
+
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static uint8_t __efx_vpd_blank_pid[] = {
+ /* Large resource type ID length 1 */
+ 0x82, 0x01, 0x00,
+ /* Product name ' ' */
+ 0x32,
+};
+
+static uint8_t __efx_vpd_blank_r[] = {
+ /* Large resource type VPD-R length 4 */
+ 0x90, 0x04, 0x00,
+ /* RV keyword length 1 */
+ 'R', 'V', 0x01,
+ /* RV payload checksum */
+ 0x00,
+};
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_reinit(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in boolean_t wantpid)
+{
+ unsigned int offset = 0;
+ unsigned int pos;
+ efx_byte_t byte;
+ uint8_t cksum;
+ efx_rc_t rc;
+
+ if (size < 0x100) {
+ rc = ENOSPC;
+ goto fail1;
+ }
+
+ if (wantpid) {
+ memcpy(data + offset, __efx_vpd_blank_pid,
+ sizeof (__efx_vpd_blank_pid));
+ offset += sizeof (__efx_vpd_blank_pid);
+ }
+
+ memcpy(data + offset, __efx_vpd_blank_r, sizeof (__efx_vpd_blank_r));
+ offset += sizeof (__efx_vpd_blank_r);
+
+ /* Update checksum */
+ cksum = 0;
+ for (pos = 0; pos < offset; pos++)
+ cksum += data[pos];
+ data[offset - 1] -= cksum;
+
+ /* Append trailing tag */
+ EFX_POPULATE_BYTE_3(byte,
+ TAG_TYPE, TAG_TYPE_SMALL_ITEM_DECODE,
+ TAG_SMALL_ITEM_NAME, TAG_NAME_END_DECODE,
+ TAG_SMALL_ITEM_SIZE, 0);
+ data[offset] = EFX_BYTE_FIELD(byte, EFX_BYTE_0);
+ offset++;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_next(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_tag_t *tagp,
+ __out efx_vpd_keyword_t *keywordp,
+ __out_opt unsigned int *payloadp,
+ __out_opt uint8_t *paylenp,
+ __inout unsigned int *contp)
+{
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword = 0;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int index;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t paylen;
+ efx_rc_t rc;
+
+ offset = index = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail1;
+
+ if (tag == EFX_VPD_END) {
+ keyword = 0;
+ paylen = 0;
+ index = 0;
+ break;
+ }
+
+ if (tag == EFX_VPD_ID) {
+ if (index++ == *contp) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+ keyword = 0;
+ paylen = (uint8_t)MIN(taglen, 0xff);
+
+ goto done;
+ }
+ } else {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail2;
+
+ if (index++ == *contp) {
+ offset += pos + 3;
+ paylen = keylen;
+
+ goto done;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+done:
+ *tagp = tag;
+ *keywordp = keyword;
+ if (payloadp != NULL)
+ *payloadp = offset;
+ if (paylenp != NULL)
+ *paylenp = paylen;
+
+ *contp = index;
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_get(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_tag_t tag,
+ __in efx_vpd_keyword_t keyword,
+ __out unsigned int *payloadp,
+ __out uint8_t *paylenp)
+{
+ efx_vpd_tag_t itag;
+ efx_vpd_keyword_t ikeyword;
+ unsigned int offset;
+ unsigned int pos;
+ uint16_t taglen;
+ uint8_t keylen;
+ efx_rc_t rc;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &itag, &taglen)) != 0)
+ goto fail1;
+ if (itag == EFX_VPD_END)
+ break;
+
+ if (itag == tag) {
+ if (itag == EFX_VPD_ID) {
+ EFSYS_ASSERT3U(taglen, <, 0x100);
+
+ *paylenp = (uint8_t)MIN(taglen, 0xff);
+ *payloadp = offset;
+ return (0);
+ }
+
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &ikeyword, &keylen)) != 0)
+ goto fail2;
+
+ if (ikeyword == keyword) {
+ *paylenp = keylen;
+ *payloadp = offset + pos + 3;
+ return (0);
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Not an error */
+ return (ENOENT);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+efx_vpd_hunk_set(
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_word_t word;
+ efx_vpd_tag_t tag;
+ efx_vpd_keyword_t keyword;
+ unsigned int offset;
+ unsigned int pos;
+ unsigned int taghead;
+ unsigned int source;
+ unsigned int dest;
+ unsigned int i;
+ uint16_t taglen;
+ uint8_t keylen;
+ uint8_t cksum;
+ size_t used;
+ efx_rc_t rc;
+
+ switch (evvp->evv_tag) {
+ case EFX_VPD_ID:
+ if (evvp->evv_keyword != 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Can't delete the ID keyword */
+ if (evvp->evv_length == 0) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ case EFX_VPD_RO:
+ if (evvp->evv_keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ rc = EINVAL;
+ goto fail1;
+ }
+ break;
+
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Determine total size of all current tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &used)) != 0)
+ goto fail2;
+
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ taghead = offset;
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail3;
+ if (tag == EFX_VPD_END)
+ break;
+ else if (tag != evvp->evv_tag) {
+ offset += taglen;
+ continue;
+ }
+
+ /* We only support modifying large resource tags */
+ if (offset - taghead != 3) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /*
+ * Work out the offset of the byte immediately after the
+ * old (=source) and new (=dest) new keyword/tag
+ */
+ pos = 0;
+ if (tag == EFX_VPD_ID) {
+ source = offset + taglen;
+ dest = offset + evvp->evv_length;
+ goto check_space;
+ }
+
+ EFSYS_ASSERT3U(tag, ==, EFX_VPD_RO);
+ source = dest = 0;
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail5;
+
+ if (keyword == evvp->evv_keyword &&
+ evvp->evv_length == 0) {
+ /* Deleting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos;
+ break;
+
+ } else if (keyword == evvp->evv_keyword) {
+ /* Adjusting this keyword */
+ source = offset + pos + 3 + keylen;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+
+ } else if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ /* The RV keyword must be at the end */
+ EFSYS_ASSERT3U(pos + 3 + keylen, ==, taglen);
+
+ /*
+ * The keyword doesn't already exist. If the
+ * user deleting a non-existant keyword then
+ * this is a no-op.
+ */
+ if (evvp->evv_length == 0)
+ return (0);
+
+ /* Insert this keyword before the RV keyword */
+ source = offset + pos;
+ dest = offset + pos + 3 + evvp->evv_length;
+ break;
+ }
+ }
+
+ check_space:
+ if (used + dest > size + source) {
+ rc = ENOSPC;
+ goto fail6;
+ }
+
+ /* Move trailing data */
+ (void) memmove(data + dest, data + source, used - source);
+
+ /* Copy contents */
+ memcpy(data + dest - evvp->evv_length, evvp->evv_value,
+ evvp->evv_length);
+
+ /* Insert new keyword header if required */
+ if (tag != EFX_VPD_ID && evvp->evv_length > 0) {
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0,
+ evvp->evv_keyword);
+ data[offset + pos + 0] =
+ EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset + pos + 1] =
+ EFX_WORD_FIELD(word, EFX_BYTE_1);
+ data[offset + pos + 2] = evvp->evv_length;
+ }
+
+ /* Modify tag length (large resource type) */
+ taglen += (dest - source);
+ EFX_POPULATE_WORD_1(word, EFX_WORD_0, taglen);
+ data[offset - 2] = EFX_WORD_FIELD(word, EFX_BYTE_0);
+ data[offset - 1] = EFX_WORD_FIELD(word, EFX_BYTE_1);
+
+ goto checksum;
+ }
+
+ /* Unable to find the matching tag */
+ rc = ENOENT;
+ goto fail7;
+
+checksum:
+ /* Find the RV tag, and update the checksum */
+ offset = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_next_tag(data, size, &offset,
+ &tag, &taglen)) != 0)
+ goto fail8;
+ if (tag == EFX_VPD_END)
+ break;
+ if (tag == EFX_VPD_RO) {
+ for (pos = 0; pos != taglen; pos += 3 + keylen) {
+ if ((rc = efx_vpd_next_keyword(data + offset,
+ taglen, pos, &keyword, &keylen)) != 0)
+ goto fail9;
+
+ if (keyword == EFX_VPD_KEYWORD('R', 'V')) {
+ cksum = 0;
+ for (i = 0; i < offset + pos + 3; i++)
+ cksum += data[i];
+ data[i] = -cksum;
+ break;
+ }
+ }
+ }
+
+ offset += taglen;
+ }
+
+ /* Zero out the unused portion */
+ (void) memset(data + offset + taglen, 0xff, size - offset - taglen);
+
+ return (0);
+
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+efx_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ const efx_vpd_ops_t *evpdop = enp->en_evpdop;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
+ EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_VPD);
+
+ if (evpdop->evpdo_fini != NULL)
+ evpdop->evpdo_fini(enp);
+
+ enp->en_evpdop = NULL;
+ enp->en_mod_flags &= ~EFX_MOD_VPD;
+}
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/base/hunt_impl.h b/drivers/net/sfc/base/hunt_impl.h
new file mode 100644
index 00000000..0e0c870f
--- /dev/null
+++ b/drivers/net/sfc/base/hunt_impl.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_HUNT_IMPL_H
+#define _SYS_HUNT_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+#include "efx_mcdi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Missing register definitions */
+#ifndef ER_DZ_TX_PIOBUF_OFST
+#define ER_DZ_TX_PIOBUF_OFST 0x00001000
+#endif
+#ifndef ER_DZ_TX_PIOBUF_STEP
+#define ER_DZ_TX_PIOBUF_STEP 8192
+#endif
+#ifndef ER_DZ_TX_PIOBUF_ROWS
+#define ER_DZ_TX_PIOBUF_ROWS 2048
+#endif
+
+#ifndef ER_DZ_TX_PIOBUF_SIZE
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+#endif
+
+#define HUNT_PIOBUF_NBUFS (16)
+#define HUNT_PIOBUF_SIZE (ER_DZ_TX_PIOBUF_SIZE)
+
+#define HUNT_MIN_PIO_ALLOC_SIZE (HUNT_PIOBUF_SIZE / 32)
+
+
+/* NIC */
+
+extern __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_HUNT_IMPL_H */
diff --git a/drivers/net/sfc/base/hunt_nic.c b/drivers/net/sfc/base/hunt_nic.c
new file mode 100644
index 00000000..addbf1c5
--- /dev/null
+++ b/drivers/net/sfc/base/hunt_nic.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#if EFSYS_OPT_MON_MCDI
+#include "mcdi_mon.h"
+#endif
+
+#if EFSYS_OPT_HUNTINGTON
+
+#include "ef10_tlv_layout.h"
+
+static __checkReturn efx_rc_t
+hunt_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t max_port_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * On Huntington, the firmware may not give us the current port mode, so
+ * we need to go by the set of available port modes and assume the most
+ * capable mode is in use.
+ */
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, NULL)) != 0) {
+ /* No port mode info available */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if (port_modes & (1 << TLV_PORT_MODE_40G_40G)) {
+ /*
+ * This needs the full PCIe bandwidth (and could use
+ * more) - roughly 64 Gbit/s for 8 lanes of Gen3.
+ */
+ if ((rc = efx_nic_calculate_pcie_link_bandwidth(8,
+ EFX_PCIE_LINK_SPEED_GEN3, &bandwidth)) != 0)
+ goto fail1;
+ } else {
+ if (port_modes & (1 << TLV_PORT_MODE_40G)) {
+ max_port_mode = TLV_PORT_MODE_40G;
+ } else if (port_modes & (1 << TLV_PORT_MODE_10G_10G_10G_10G)) {
+ max_port_mode = TLV_PORT_MODE_10G_10G_10G_10G;
+ } else {
+ /* Assume two 10G ports */
+ max_port_mode = TLV_PORT_MODE_10G_10G;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(max_port_mode,
+ &bandwidth)) != 0)
+ goto fail2;
+ }
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+hunt_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6] = { 0 };
+ uint32_t board_type = 0;
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t port;
+ uint32_t pf;
+ uint32_t vf;
+ uint32_t mask;
+ uint32_t flags;
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t base, nvec;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /*
+ * NOTE: The MCDI protocol numbers ports from zero.
+ * The common code MCDI interface numbers ports from one.
+ */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for Huntington */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+ /*
+ * If the bug35388 workaround is enabled, then use an indirect access
+ * method to avoid unsafe EVQ writes.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG35388, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug35388_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug35388_workaround = B_FALSE;
+ else
+ goto fail8;
+
+ /*
+ * If the bug41750 workaround is enabled, then do not test interrupts,
+ * as the test will fail (seen with Greenport controllers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG41750, B_TRUE,
+ NULL);
+ if (rc == 0) {
+ encp->enc_bug41750_workaround = B_TRUE;
+ } else if (rc == EACCES) {
+ /* Assume a controller with 40G ports needs the workaround. */
+ if (epp->ep_default_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ encp->enc_bug41750_workaround = B_TRUE;
+ else
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug41750_workaround = B_FALSE;
+ } else {
+ goto fail9;
+ }
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /* Interrupt testing does not work for VFs. See bug50084. */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /*
+ * If the bug26807 workaround is enabled, then firmware has enabled
+ * support for chained multicast filters. Firmware will reset (FLR)
+ * functions which have filters in the hardware filter table when the
+ * workaround is enabled/disabled.
+ *
+ * We must recheck if the workaround is enabled after inserting the
+ * first hardware filter, in case it has been changed since this check.
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807,
+ B_TRUE, &flags);
+ if (rc == 0) {
+ encp->enc_bug26807_workaround = B_TRUE;
+ if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) {
+ /*
+ * Other functions had installed filters before the
+ * workaround was enabled, and they have been reset
+ * by firmware.
+ */
+ EFSYS_PROBE(bug26807_workaround_flr_done);
+ /* FIXME: bump MC warm boot count ? */
+ }
+ } else if (rc == EACCES) {
+ /*
+ * Unprivileged functions cannot enable the workaround in older
+ * firmware.
+ */
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else if ((rc == ENOTSUP) || (rc == ENOENT)) {
+ encp->enc_bug26807_workaround = B_FALSE;
+ } else {
+ goto fail10;
+ }
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail11;
+
+ /*
+ * The Huntington timer quantum is 1536 sysclk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / sysclk; /* 1536 cycles */
+ if (encp->enc_bug35388_workaround) {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ ERF_DD_EVQ_IND_TIMER_VAL_WIDTH) / 1000;
+ } else {
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+ }
+
+ encp->enc_bug61265_workaround = B_FALSE; /* Medford only */
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail12;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 64; /* RX DMA end padding */
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
+ /* No boundary crossing limits */
+ encp->enc_tx_dma_desc_boundary = 0;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ /*
+ * The workaround for bug35388 uses the top bit of transmit queue
+ * descriptor writes, preventing the use of 4096 descriptor TXQs.
+ */
+ encp->enc_txq_max_ndescs = encp->enc_bug35388_workaround ? 2048 : 4096;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ encp->enc_piobuf_limit = HUNT_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = HUNT_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = HUNT_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail13;
+ encp->enc_privilege_mask = mask;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail14;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ if ((rc = hunt_nic_get_required_pcie_bandwidth(enp, &bandwidth)) != 0)
+ goto fail15;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+
+ /* All Huntington devices have a PCIe Gen3, 8 lane connector */
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail15:
+ EFSYS_PROBE(fail15);
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+#endif /* EFSYS_OPT_HUNTINGTON */
diff --git a/drivers/net/sfc/base/mcdi_mon.c b/drivers/net/sfc/base/mcdi_mon.c
new file mode 100644
index 00000000..c5360c31
--- /dev/null
+++ b/drivers/net/sfc/base/mcdi_mon.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+#define MCDI_MON_NEXT_PAGE ((uint16_t)0xfffe)
+#define MCDI_MON_INVALID_SENSOR ((uint16_t)0xfffd)
+#define MCDI_MON_PAGE_SIZE 0x20
+
+/* Bitmasks of valid port(s) for each sensor */
+#define MCDI_MON_PORT_NONE (0x00)
+#define MCDI_MON_PORT_P1 (0x01)
+#define MCDI_MON_PORT_P2 (0x02)
+#define MCDI_MON_PORT_P3 (0x04)
+#define MCDI_MON_PORT_P4 (0x08)
+#define MCDI_MON_PORT_Px (0xFFFF)
+
+/* Get port mask from one-based MCDI port number */
+#define MCDI_MON_PORT_MASK(_emip) (1U << ((_emip)->emi_port - 1))
+
+/* Entry for MCDI sensor in sensor map */
+#define STAT(portmask, stat) \
+ { (MCDI_MON_PORT_##portmask), (EFX_MON_STAT_##stat) }
+
+/* Entry for sensor next page flag in sensor map */
+#define STAT_NEXT_PAGE() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_NEXT_PAGE }
+
+/* Placeholder for gaps in the array */
+#define STAT_NO_SENSOR() \
+ { MCDI_MON_PORT_NONE, MCDI_MON_INVALID_SENSOR }
+
+/* Map from MC sensors to monitor statistics */
+static const struct mcdi_sensor_map_s {
+ uint16_t msm_port_mask;
+ uint16_t msm_stat;
+} mcdi_sensor_map[] = {
+ /* Sensor page 0 MC_CMD_SENSOR_xxx */
+ STAT(Px, INT_TEMP), /* 0x00 CONTROLLER_TEMP */
+ STAT(Px, EXT_TEMP), /* 0x01 PHY_COMMON_TEMP */
+ STAT(Px, INT_COOLING), /* 0x02 CONTROLLER_COOLING */
+ STAT(P1, EXT_TEMP), /* 0x03 PHY0_TEMP */
+ STAT(P1, EXT_COOLING), /* 0x04 PHY0_COOLING */
+ STAT(P2, EXT_TEMP), /* 0x05 PHY1_TEMP */
+ STAT(P2, EXT_COOLING), /* 0x06 PHY1_COOLING */
+ STAT(Px, 1V), /* 0x07 IN_1V0 */
+ STAT(Px, 1_2V), /* 0x08 IN_1V2 */
+ STAT(Px, 1_8V), /* 0x09 IN_1V8 */
+ STAT(Px, 2_5V), /* 0x0a IN_2V5 */
+ STAT(Px, 3_3V), /* 0x0b IN_3V3 */
+ STAT(Px, 12V), /* 0x0c IN_12V0 */
+ STAT(Px, 1_2VA), /* 0x0d IN_1V2A */
+ STAT(Px, VREF), /* 0x0e IN_VREF */
+ STAT(Px, VAOE), /* 0x0f OUT_VAOE */
+ STAT(Px, AOE_TEMP), /* 0x10 AOE_TEMP */
+ STAT(Px, PSU_AOE_TEMP), /* 0x11 PSU_AOE_TEMP */
+ STAT(Px, PSU_TEMP), /* 0x12 PSU_TEMP */
+ STAT(Px, FAN0), /* 0x13 FAN_0 */
+ STAT(Px, FAN1), /* 0x14 FAN_1 */
+ STAT(Px, FAN2), /* 0x15 FAN_2 */
+ STAT(Px, FAN3), /* 0x16 FAN_3 */
+ STAT(Px, FAN4), /* 0x17 FAN_4 */
+ STAT(Px, VAOE_IN), /* 0x18 IN_VAOE */
+ STAT(Px, IAOE), /* 0x19 OUT_IAOE */
+ STAT(Px, IAOE_IN), /* 0x1a IN_IAOE */
+ STAT(Px, NIC_POWER), /* 0x1b NIC_POWER */
+ STAT(Px, 0_9V), /* 0x1c IN_0V9 */
+ STAT(Px, I0_9V), /* 0x1d IN_I0V9 */
+ STAT(Px, I1_2V), /* 0x1e IN_I1V2 */
+ STAT_NEXT_PAGE(), /* 0x1f Next page flag (not a sensor) */
+
+ /* Sensor page 1 MC_CMD_SENSOR_xxx */
+ STAT(Px, 0_9V_ADC), /* 0x20 IN_0V9_ADC */
+ STAT(Px, INT_TEMP2), /* 0x21 CONTROLLER_2_TEMP */
+ STAT(Px, VREG_TEMP), /* 0x22 VREG_INTERNAL_TEMP */
+ STAT(Px, VREG_0_9V_TEMP), /* 0x23 VREG_0V9_TEMP */
+ STAT(Px, VREG_1_2V_TEMP), /* 0x24 VREG_1V2_TEMP */
+ STAT(Px, INT_VPTAT), /* 0x25 CTRLR. VPTAT */
+ STAT(Px, INT_ADC_TEMP), /* 0x26 CTRLR. INTERNAL_TEMP */
+ STAT(Px, EXT_VPTAT), /* 0x27 CTRLR. VPTAT_EXTADC */
+ STAT(Px, EXT_ADC_TEMP), /* 0x28 CTRLR. INTERNAL_TEMP_EXTADC */
+ STAT(Px, AMBIENT_TEMP), /* 0x29 AMBIENT_TEMP */
+ STAT(Px, AIRFLOW), /* 0x2a AIRFLOW */
+ STAT(Px, VDD08D_VSS08D_CSR), /* 0x2b VDD08D_VSS08D_CSR */
+ STAT(Px, VDD08D_VSS08D_CSR_EXTADC), /* 0x2c VDD08D_VSS08D_CSR_EXTADC */
+ STAT(Px, HOTPOINT_TEMP), /* 0x2d HOTPOINT_TEMP */
+ STAT(P1, PHY_POWER_SWITCH_PORT0), /* 0x2e PHY_POWER_SWITCH_PORT0 */
+ STAT(P2, PHY_POWER_SWITCH_PORT1), /* 0x2f PHY_POWER_SWITCH_PORT1 */
+ STAT(Px, MUM_VCC), /* 0x30 MUM_VCC */
+ STAT(Px, 0V9_A), /* 0x31 0V9_A */
+ STAT(Px, I0V9_A), /* 0x32 I0V9_A */
+ STAT(Px, 0V9_A_TEMP), /* 0x33 0V9_A_TEMP */
+ STAT(Px, 0V9_B), /* 0x34 0V9_B */
+ STAT(Px, I0V9_B), /* 0x35 I0V9_B */
+ STAT(Px, 0V9_B_TEMP), /* 0x36 0V9_B_TEMP */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY), /* 0x37 CCOM_AVREG_1V2_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V2_SUPPLY_EXT_ADC),
+ /* 0x38 CCOM_AVREG_1V2_SUPPLY_EXT_ADC */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY), /* 0x39 CCOM_AVREG_1V8_SUPPLY */
+ STAT(Px, CCOM_AVREG_1V8_SUPPLY_EXT_ADC),
+ /* 0x3a CCOM_AVREG_1V8_SUPPLY_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x3b (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3c (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3d (no sensor) */
+ STAT_NO_SENSOR(), /* 0x3e (no sensor) */
+ STAT_NEXT_PAGE(), /* 0x3f Next page flag (not a sensor) */
+
+ /* Sensor page 2 MC_CMD_SENSOR_xxx */
+ STAT(Px, CONTROLLER_MASTER_VPTAT), /* 0x40 MASTER_VPTAT */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP), /* 0x41 MASTER_INT_TEMP */
+ STAT(Px, CONTROLLER_MASTER_VPTAT_EXT_ADC), /* 0x42 MAST_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_MASTER_INTERNAL_TEMP_EXT_ADC),
+ /* 0x43 MASTER_INTERNAL_TEMP_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT), /* 0x44 SLAVE_VPTAT */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP), /* 0x45 SLAVE_INTERNAL_TEMP */
+ STAT(Px, CONTROLLER_SLAVE_VPTAT_EXT_ADC), /* 0x46 SLAVE_VPTAT_EXT_ADC */
+ STAT(Px, CONTROLLER_SLAVE_INTERNAL_TEMP_EXT_ADC),
+ /* 0x47 SLAVE_INTERNAL_TEMP_EXT_ADC */
+ STAT_NO_SENSOR(), /* 0x48 (no sensor) */
+ STAT(Px, SODIMM_VOUT), /* 0x49 SODIMM_VOUT */
+ STAT(Px, SODIMM_0_TEMP), /* 0x4a SODIMM_0_TEMP */
+ STAT(Px, SODIMM_1_TEMP), /* 0x4b SODIMM_1_TEMP */
+ STAT(Px, PHY0_VCC), /* 0x4c PHY0_VCC */
+ STAT(Px, PHY1_VCC), /* 0x4d PHY1_VCC */
+ STAT(Px, CONTROLLER_TDIODE_TEMP), /* 0x4e CONTROLLER_TDIODE_TEMP */
+ STAT(Px, BOARD_FRONT_TEMP), /* 0x4f BOARD_FRONT_TEMP */
+ STAT(Px, BOARD_BACK_TEMP), /* 0x50 BOARD_BACK_TEMP */
+};
+
+#define MCDI_STATIC_SENSOR_ASSERT(_field) \
+ EFX_STATIC_ASSERT(MC_CMD_SENSOR_STATE_ ## _field \
+ == EFX_MON_STAT_STATE_ ## _field)
+
+static void
+mcdi_mon_decode_stats(
+ __in efx_nic_t *enp,
+ __in_bcount(sensor_mask_size) uint32_t *sensor_mask,
+ __in size_t sensor_mask_size,
+ __in_opt efsys_mem_t *esmp,
+ __out_bcount_opt(sensor_mask_size) uint32_t *stat_maskp,
+ __inout_ecount_opt(EFX_MON_NSTATS) efx_mon_stat_value_t *stat)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint16_t port_mask;
+ uint16_t sensor;
+ size_t sensor_max;
+ uint32_t stat_mask[(EFX_ARRAY_SIZE(mcdi_sensor_map) + 31) / 32];
+ uint32_t idx = 0;
+ uint32_t page = 0;
+
+ /* Assert the MC_CMD_SENSOR and EFX_MON_STATE namespaces agree */
+ MCDI_STATIC_SENSOR_ASSERT(OK);
+ MCDI_STATIC_SENSOR_ASSERT(WARNING);
+ MCDI_STATIC_SENSOR_ASSERT(FATAL);
+ MCDI_STATIC_SENSOR_ASSERT(BROKEN);
+ MCDI_STATIC_SENSOR_ASSERT(NO_READING);
+
+ EFX_STATIC_ASSERT(sizeof (stat_mask[0]) * 8 ==
+ EFX_MON_MASK_ELEMENT_SIZE);
+ sensor_max =
+ MIN((8 * sensor_mask_size), EFX_ARRAY_SIZE(mcdi_sensor_map));
+
+ EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */
+ port_mask = MCDI_MON_PORT_MASK(emip);
+
+ memset(stat_mask, 0, sizeof (stat_mask));
+
+ /*
+ * The MCDI sensor readings in the DMA buffer are a packed array of
+ * MC_CMD_SENSOR_VALUE_ENTRY structures, which only includes entries for
+ * supported sensors (bit set in sensor_mask). The sensor_mask and
+ * sensor readings do not include entries for the per-page NEXT_PAGE
+ * flag.
+ *
+ * sensor_mask may legitimately contain MCDI sensors that the driver
+ * does not understand.
+ */
+ for (sensor = 0; sensor < sensor_max; ++sensor) {
+ efx_mon_stat_t id = mcdi_sensor_map[sensor].msm_stat;
+
+ if ((sensor % MCDI_MON_PAGE_SIZE) == MC_CMD_SENSOR_PAGE0_NEXT) {
+ EFSYS_ASSERT3U(id, ==, MCDI_MON_NEXT_PAGE);
+ page++;
+ continue;
+ }
+ if (~(sensor_mask[page]) & (1U << sensor))
+ continue;
+ idx++;
+
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ continue;
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ /*
+ * stat_mask is a bitmask indexed by EFX_MON_* monitor statistic
+ * identifiers from efx_mon_stat_t (without NEXT_PAGE bits).
+ *
+ * If there is an entry in the MCDI sensor to monitor statistic
+ * map then the sensor reading is used for the value of the
+ * monitor statistic.
+ */
+ stat_mask[id / EFX_MON_MASK_ELEMENT_SIZE] |=
+ (1U << (id % EFX_MON_MASK_ELEMENT_SIZE));
+
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+
+ /* Get MCDI sensor reading from DMA buffer */
+ EFSYS_MEM_READD(esmp, 4 * (idx - 1), &dword);
+
+ /* Update EFX monitor stat from MCDI sensor reading */
+ stat[id].emsv_value = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+
+ stat[id].emsv_state = (uint16_t)EFX_DWORD_FIELD(dword,
+ MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ }
+ }
+
+ if (stat_maskp != NULL) {
+ memcpy(stat_maskp, stat_mask, sizeof (stat_mask));
+ }
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint16_t port_mask;
+ uint16_t sensor;
+ uint16_t state;
+ uint16_t value;
+ efx_mon_stat_t id;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(emip->emi_port > 0); /* MCDI port number is one-based */
+ port_mask = MCDI_MON_PORT_MASK(emip);
+
+ sensor = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_MONITOR);
+ state = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_STATE);
+ value = (uint16_t)MCDI_EV_FIELD(eqp, SENSOREVT_VALUE);
+
+ /* Hardware must support this MCDI sensor */
+ EFSYS_ASSERT3U(sensor, <, (8 * encp->enc_mcdi_sensor_mask_size));
+ EFSYS_ASSERT((sensor % MCDI_MON_PAGE_SIZE) != MC_CMD_SENSOR_PAGE0_NEXT);
+ EFSYS_ASSERT(encp->enc_mcdi_sensor_maskp != NULL);
+ EFSYS_ASSERT((encp->enc_mcdi_sensor_maskp[sensor / MCDI_MON_PAGE_SIZE] &
+ (1U << (sensor % MCDI_MON_PAGE_SIZE))) != 0);
+
+ /* But we don't have to understand it */
+ if (sensor >= EFX_ARRAY_SIZE(mcdi_sensor_map)) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+ id = mcdi_sensor_map[sensor].msm_stat;
+ if ((port_mask & mcdi_sensor_map[sensor].msm_port_mask) == 0)
+ return (ENODEV);
+ EFSYS_ASSERT(id < EFX_MON_NSTATS);
+
+ *idp = id;
+ valuep->emsv_value = value;
+ valuep->emsv_state = state;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+
+static __checkReturn efx_rc_t
+efx_mcdi_read_sensors(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __in uint32_t size)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_READ_SENSORS_EXT_IN_LEN,
+ MC_CMD_READ_SENSORS_EXT_OUT_LEN)];
+ uint32_t addr_lo, addr_hi;
+
+ req.emr_cmd = MC_CMD_READ_SENSORS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_READ_SENSORS_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_READ_SENSORS_EXT_OUT_LEN;
+
+ addr_lo = (uint32_t)(EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ addr_hi = (uint32_t)(EFSYS_MEM_ADDR(esmp) >> 32);
+
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_LO, addr_lo);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_DMA_ADDR_HI, addr_hi);
+ MCDI_IN_SET_DWORD(req, READ_SENSORS_EXT_IN_LENGTH, size);
+
+ efx_mcdi_execute(enp, &req);
+
+ return (req.emr_rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info_npages(
+ __in efx_nic_t *enp,
+ __out uint32_t *npagesp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ int page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(npagesp != NULL);
+
+ page = 0;
+ do {
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page++);
+
+ efx_mcdi_execute_quiet(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ } while (MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK) &
+ (1U << MC_CMD_SENSOR_PAGE0_NEXT));
+
+ *npagesp = page;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+efx_mcdi_sensor_info(
+ __in efx_nic_t *enp,
+ __out_ecount(npages) uint32_t *sensor_maskp,
+ __in size_t npages)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_SENSOR_INFO_EXT_IN_LEN,
+ MC_CMD_SENSOR_INFO_OUT_LENMAX)];
+ uint32_t page;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(sensor_maskp != NULL);
+
+ for (page = 0; page < npages; page++) {
+ uint32_t mask;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SENSOR_INFO;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SENSOR_INFO_EXT_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SENSOR_INFO_OUT_LENMAX;
+
+ MCDI_IN_SET_DWORD(req, SENSOR_INFO_EXT_IN_PAGE, page);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ mask = MCDI_OUT_DWORD(req, SENSOR_INFO_OUT_MASK);
+
+ if ((page != (npages - 1)) &&
+ ((mask & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) == 0)) {
+ rc = EINVAL;
+ goto fail2;
+ }
+ sensor_maskp[page] = mask;
+ }
+
+ if (sensor_maskp[npages - 1] & (1U << MC_CMD_SENSOR_PAGE0_NEXT)) {
+ rc = EINVAL;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t size = encp->enc_mon_stat_dma_buf_size;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_read_sensors(enp, esmp, size)) != 0)
+ goto fail1;
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, size);
+
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ esmp, NULL, values);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t npages;
+ efx_rc_t rc;
+
+ switch (enp->en_family) {
+#if EFSYS_OPT_SIENA
+ case EFX_FAMILY_SIENA:
+ encp->enc_mon_type = EFX_MON_SFC90X0;
+ break;
+#endif
+#if EFSYS_OPT_HUNTINGTON
+ case EFX_FAMILY_HUNTINGTON:
+ encp->enc_mon_type = EFX_MON_SFC91X0;
+ break;
+#endif
+#if EFSYS_OPT_MEDFORD
+ case EFX_FAMILY_MEDFORD:
+ encp->enc_mon_type = EFX_MON_SFC92X0;
+ break;
+#endif
+ default:
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Get mc sensor mask size */
+ npages = 0;
+ if ((rc = efx_mcdi_sensor_info_npages(enp, &npages)) != 0)
+ goto fail2;
+
+ encp->enc_mon_stat_dma_buf_size = npages * EFX_MON_STATS_PAGE_SIZE;
+ encp->enc_mcdi_sensor_mask_size = npages * sizeof (uint32_t);
+
+ /* Allocate mc sensor mask */
+ EFSYS_KMEM_ALLOC(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+ if (encp->enc_mcdi_sensor_maskp == NULL) {
+ rc = ENOMEM;
+ goto fail3;
+ }
+
+ /* Read mc sensor mask */
+ if ((rc = efx_mcdi_sensor_info(enp,
+ encp->enc_mcdi_sensor_maskp,
+ npages)) != 0)
+ goto fail4;
+
+ /* Build monitor statistics mask */
+ mcdi_mon_decode_stats(enp,
+ encp->enc_mcdi_sensor_maskp,
+ encp->enc_mcdi_sensor_mask_size,
+ NULL, encp->enc_mon_stat_mask, NULL);
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+
+fail3:
+ EFSYS_PROBE(fail3);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+
+ if (encp->enc_mcdi_sensor_maskp != NULL) {
+ EFSYS_KMEM_FREE(enp->en_esip,
+ encp->enc_mcdi_sensor_mask_size,
+ encp->enc_mcdi_sensor_maskp);
+ }
+}
+
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
diff --git a/drivers/net/sfc/base/mcdi_mon.h b/drivers/net/sfc/base/mcdi_mon.h
new file mode 100644
index 00000000..e07b5280
--- /dev/null
+++ b/drivers/net/sfc/base/mcdi_mon.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_MCDI_MON_H
+#define _SYS_MCDI_MON_H
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if EFSYS_OPT_MON_MCDI
+
+#if EFSYS_OPT_MON_STATS
+
+ __checkReturn efx_rc_t
+mcdi_mon_cfg_build(
+ __in efx_nic_t *enp);
+
+ void
+mcdi_mon_cfg_free(
+ __in efx_nic_t *enp);
+
+
+extern __checkReturn efx_rc_t
+mcdi_mon_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_mon_stat_t *idp,
+ __out efx_mon_stat_value_t *valuep);
+
+extern __checkReturn efx_rc_t
+mcdi_mon_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MON_NSTATS) efx_mon_stat_value_t *values);
+
+#endif /* EFSYS_OPT_MON_STATS */
+
+#endif /* EFSYS_OPT_MON_MCDI */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MCDI_MON_H */
diff --git a/drivers/net/sfc/base/medford_impl.h b/drivers/net/sfc/base/medford_impl.h
new file mode 100644
index 00000000..de2f5cf0
--- /dev/null
+++ b/drivers/net/sfc/base/medford_impl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_MEDFORD_IMPL_H
+#define _SYS_MEDFORD_IMPL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary
+ *
+ * FIXME: Is this the same on Medford as Huntington?
+ */
+#define MEDFORD_RX_WPTR_ALIGN 8
+
+
+
+#ifndef ER_EZ_TX_PIOBUF_SIZE
+#define ER_EZ_TX_PIOBUF_SIZE 4096
+#endif
+
+
+#define MEDFORD_PIOBUF_NBUFS (16)
+#define MEDFORD_PIOBUF_SIZE (ER_EZ_TX_PIOBUF_SIZE)
+
+#define MEDFORD_MIN_PIO_ALLOC_SIZE (MEDFORD_PIOBUF_SIZE / 32)
+
+
+extern __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_MEDFORD_IMPL_H */
diff --git a/drivers/net/sfc/base/medford_nic.c b/drivers/net/sfc/base/medford_nic.c
new file mode 100644
index 00000000..07afac1e
--- /dev/null
+++ b/drivers/net/sfc/base/medford_nic.c
@@ -0,0 +1,402 @@
+/*
+ * Copyright (c) 2015-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+
+#if EFSYS_OPT_MEDFORD
+
+static __checkReturn efx_rc_t
+efx_mcdi_get_rxdp_config(
+ __in efx_nic_t *enp,
+ __out uint32_t *end_paddingp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_RXDP_CONFIG_IN_LEN,
+ MC_CMD_GET_RXDP_CONFIG_OUT_LEN)];
+ uint32_t end_padding;
+ efx_rc_t rc;
+
+ memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_RXDP_CONFIG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) {
+ /* RX DMA end padding is disabled */
+ end_padding = 0;
+ } else {
+ switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA,
+ GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) {
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64:
+ end_padding = 64;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128:
+ end_padding = 128;
+ break;
+ case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256:
+ end_padding = 256;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail2;
+ }
+ }
+
+ *end_paddingp = end_padding;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+medford_nic_get_required_pcie_bandwidth(
+ __in efx_nic_t *enp,
+ __out uint32_t *bandwidth_mbpsp)
+{
+ uint32_t port_modes;
+ uint32_t current_mode;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_get_port_modes(enp, &port_modes,
+ &current_mode)) != 0) {
+ /* No port mode info available. */
+ bandwidth = 0;
+ goto out;
+ }
+
+ if ((rc = ef10_nic_get_port_mode_bandwidth(current_mode,
+ &bandwidth)) != 0)
+ goto fail1;
+
+out:
+ *bandwidth_mbpsp = bandwidth;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+medford_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6] = { 0 };
+ uint32_t board_type = 0;
+ ef10_link_state_t els;
+ efx_port_t *epp = &(enp->en_port);
+ uint32_t port;
+ uint32_t pf;
+ uint32_t vf;
+ uint32_t mask;
+ uint32_t sysclk, dpcpu_clk;
+ uint32_t base, nvec;
+ uint32_t end_padding;
+ uint32_t bandwidth;
+ efx_rc_t rc;
+
+ /*
+ * FIXME: Likely to be incomplete and incorrect.
+ * Parts of this should be shared with Huntington.
+ */
+
+ if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0)
+ goto fail1;
+
+ /*
+ * NOTE: The MCDI protocol numbers ports from zero.
+ * The common code MCDI interface numbers ports from one.
+ */
+ emip->emi_port = port + 1;
+
+ if ((rc = ef10_external_port_mapping(enp, port,
+ &encp->enc_external_port)) != 0)
+ goto fail2;
+
+ /*
+ * Get PCIe function number from firmware (used for
+ * per-function privilege and dynamic config info).
+ * - PCIe PF: pf = PF number, vf = 0xffff.
+ * - PCIe VF: pf = parent PF, vf = VF number.
+ */
+ if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0)
+ goto fail3;
+
+ encp->enc_pf = pf;
+ encp->enc_vf = vf;
+
+ /* MAC address for this function */
+ if (EFX_PCI_FUNCTION_IS_PF(encp)) {
+ rc = efx_mcdi_get_mac_address_pf(enp, mac_addr);
+#if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC
+ /* Disable static config checking for Medford NICs, ONLY
+ * for manufacturing test and setup at the factory, to
+ * allow the static config to be installed.
+ */
+#else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ if ((rc == 0) && (mac_addr[0] & 0x02)) {
+ /*
+ * If the static config does not include a global MAC
+ * address pool then the board may return a locally
+ * administered MAC address (this should only happen on
+ * incorrectly programmed boards).
+ */
+ rc = EINVAL;
+ }
+#endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */
+ } else {
+ rc = efx_mcdi_get_mac_address_vf(enp, mac_addr);
+ }
+ if (rc != 0)
+ goto fail4;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ /* Board configuration */
+ rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL);
+ if (rc != 0) {
+ /* Unprivileged functions may not be able to read board cfg */
+ if (rc == EACCES)
+ board_type = 0;
+ else
+ goto fail5;
+ }
+
+ encp->enc_board_type = board_type;
+ encp->enc_clk_mult = 1; /* not used for Medford */
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = ef10_phy_get_link(enp, &els)) != 0)
+ goto fail7;
+ epp->ep_default_adv_cap_mask = els.els_adv_cap_mask;
+ epp->ep_adv_cap_mask = els.els_adv_cap_mask;
+
+ /*
+ * Enable firmware workarounds for hardware errata.
+ * Expected responses are:
+ * - 0 (zero):
+ * Success: workaround enabled or disabled as requested.
+ * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP):
+ * Firmware does not support the MC_CMD_WORKAROUND request.
+ * (assume that the workaround is not supported).
+ * - MC_CMD_ERR_ENOENT (reported as ENOENT):
+ * Firmware does not support the requested workaround.
+ * - MC_CMD_ERR_EPERM (reported as EACCES):
+ * Unprivileged function cannot enable/disable workarounds.
+ *
+ * See efx_mcdi_request_errcode() for MCDI error translations.
+ */
+
+
+ if (EFX_PCI_FUNCTION_IS_VF(encp)) {
+ /*
+ * Interrupt testing does not work for VFs. See bug50084.
+ * FIXME: Does this still apply to Medford?
+ */
+ encp->enc_bug41750_workaround = B_TRUE;
+ }
+
+ /* Chained multicast is always enabled on Medford */
+ encp->enc_bug26807_workaround = B_TRUE;
+
+ /*
+ * If the bug61265 workaround is enabled, then interrupt holdoff timers
+ * cannot be controlled by timer table writes, so MCDI must be used
+ * (timer table writes can still be used for wakeup timers).
+ */
+ rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG61265, B_TRUE,
+ NULL);
+ if ((rc == 0) || (rc == EACCES))
+ encp->enc_bug61265_workaround = B_TRUE;
+ else if ((rc == ENOTSUP) || (rc == ENOENT))
+ encp->enc_bug61265_workaround = B_FALSE;
+ else
+ goto fail8;
+
+ /* Get clock frequencies (in MHz). */
+ if ((rc = efx_mcdi_get_clock(enp, &sysclk, &dpcpu_clk)) != 0)
+ goto fail9;
+
+ /*
+ * The Medford timer quantum is 1536 dpcpu_clk cycles, documented for
+ * the EV_TMR_VAL field of EV_TIMER_TBL. Scale for MHz and ns units.
+ */
+ encp->enc_evq_timer_quantum_ns = 1536000UL / dpcpu_clk; /* 1536 cycles */
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* Check capabilities of running datapath firmware */
+ if ((rc = ef10_get_datapath_caps(enp)) != 0)
+ goto fail10;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+
+ /* Get the RX DMA end padding alignment configuration */
+ if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) {
+ if (rc != EACCES)
+ goto fail11;
+
+ /* Assume largest tail padding size supported by hardware */
+ end_padding = 256;
+ }
+ encp->enc_rx_buf_align_end = end_padding;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT);
+ /* No boundary crossing limits */
+ encp->enc_tx_dma_desc_boundary = 0;
+
+ /*
+ * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use
+ * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available
+ * resources (allocated to this PCIe function), which is zero until
+ * after we have allocated VIs.
+ */
+ encp->enc_evq_limit = 1024;
+ encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET;
+ encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET;
+
+ /*
+ * The maximum supported transmit queue size is 2048. TXQs with 4096
+ * descriptors are not supported as the top bit is used for vfifo
+ * stuffing.
+ */
+ encp->enc_txq_max_ndescs = 2048;
+
+ encp->enc_buftbl_limit = 0xFFFFFFFF;
+
+ encp->enc_piobuf_limit = MEDFORD_PIOBUF_NBUFS;
+ encp->enc_piobuf_size = MEDFORD_PIOBUF_SIZE;
+ encp->enc_piobuf_min_alloc_size = MEDFORD_MIN_PIO_ALLOC_SIZE;
+
+ /*
+ * Get the current privilege mask. Note that this may be modified
+ * dynamically, so this value is informational only. DO NOT use
+ * the privilege mask to check for sufficient privileges, as that
+ * can result in time-of-check/time-of-use bugs.
+ */
+ if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0)
+ goto fail12;
+ encp->enc_privilege_mask = mask;
+
+ /* Get interrupt vector limits */
+ if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) {
+ if (EFX_PCI_FUNCTION_IS_PF(encp))
+ goto fail13;
+
+ /* Ignore error (cannot query vector limits from a VF). */
+ base = 0;
+ nvec = 1024;
+ }
+ encp->enc_intr_vec_base = base;
+ encp->enc_intr_limit = nvec;
+
+ /*
+ * Maximum number of bytes into the frame the TCP header can start for
+ * firmware assisted TSO to work.
+ */
+ encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT;
+
+ /*
+ * Medford stores a single global copy of VPD, not per-PF as on
+ * Huntington.
+ */
+ encp->enc_vpd_is_global = B_TRUE;
+
+ rc = medford_nic_get_required_pcie_bandwidth(enp, &bandwidth);
+ if (rc != 0)
+ goto fail14;
+ encp->enc_required_pcie_bandwidth_mbps = bandwidth;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
+
+ return (0);
+
+fail14:
+ EFSYS_PROBE(fail14);
+fail13:
+ EFSYS_PROBE(fail13);
+fail12:
+ EFSYS_PROBE(fail12);
+fail11:
+ EFSYS_PROBE(fail11);
+fail10:
+ EFSYS_PROBE(fail10);
+fail9:
+ EFSYS_PROBE(fail9);
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_MEDFORD */
diff --git a/drivers/net/sfc/base/siena_flash.h b/drivers/net/sfc/base/siena_flash.h
new file mode 100644
index 00000000..e2700554
--- /dev/null
+++ b/drivers/net/sfc/base/siena_flash.h
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2007-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SIENA_FLASH_H
+#define _SYS_SIENA_FLASH_H
+
+#pragma pack(1)
+
+/* Fixed locations near the start of flash (which may be in the internal PHY
+ * firmware header) point to the boot header.
+ *
+ * - parsed by MC boot ROM and firmware
+ * - reserved (but not parsed) by PHY firmware
+ * - opaque to driver
+ */
+
+#define SIENA_MC_BOOT_PHY_FW_HDR_LEN (0x20)
+
+#define SIENA_MC_BOOT_PTR_LOCATION (0x18) /* First thing we try to boot */
+#define SIENA_MC_BOOT_ALT_PTR_LOCATION (0x1c) /* Alternative if that fails */
+
+#define SIENA_MC_BOOT_HDR_LEN (0x200)
+
+#define SIENA_MC_BOOT_MAGIC (0x51E4A001)
+#define SIENA_MC_BOOT_VERSION (1)
+
+
+/*Structures supporting an arbitrary number of binary blobs in the flash image
+ intended to house code and tables for the satellite cpus*/
+/*thanks to random.org for:*/
+#define BLOBS_HEADER_MAGIC (0xBDA3BBD4)
+#define BLOB_HEADER_MAGIC (0xA1478A91)
+
+typedef struct blobs_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t no_of_blobs;
+} blobs_hdr_t;
+
+typedef struct blob_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic;
+ efx_dword_t cpu_type;
+ efx_dword_t build_variant;
+ efx_dword_t offset;
+ efx_dword_t length;
+ efx_dword_t checksum;
+} blob_hdr_t;
+
+#define BLOB_CPU_TYPE_TXDI_TEXT (0)
+#define BLOB_CPU_TYPE_RXDI_TEXT (1)
+#define BLOB_CPU_TYPE_TXDP_TEXT (2)
+#define BLOB_CPU_TYPE_RXDP_TEXT (3)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT (4)
+#define BLOB_CPU_TYPE_RXHRSL_HR_LUT_CFG (5)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT (6)
+#define BLOB_CPU_TYPE_TXHRSL_HR_LUT_CFG (7)
+#define BLOB_CPU_TYPE_RXHRSL_HR_PGM (8)
+#define BLOB_CPU_TYPE_RXHRSL_SL_PGM (9)
+#define BLOB_CPU_TYPE_TXHRSL_HR_PGM (10)
+#define BLOB_CPU_TYPE_TXHRSL_SL_PGM (11)
+#define BLOB_CPU_TYPE_RXDI_VTBL0 (12)
+#define BLOB_CPU_TYPE_TXDI_VTBL0 (13)
+#define BLOB_CPU_TYPE_RXDI_VTBL1 (14)
+#define BLOB_CPU_TYPE_TXDI_VTBL1 (15)
+#define BLOB_CPU_TYPE_DUMPSPEC (32)
+#define BLOB_CPU_TYPE_MC_XIP (33)
+
+#define BLOB_CPU_TYPE_INVALID (31)
+
+/*
+ * The upper four bits of the CPU type field specify the compression
+ * algorithm used for this blob.
+ */
+#define BLOB_COMPRESSION_MASK (0xf0000000)
+#define BLOB_CPU_TYPE_MASK (0x0fffffff)
+
+#define BLOB_COMPRESSION_NONE (0x00000000) /* Stored as is */
+#define BLOB_COMPRESSION_LZ (0x10000000) /* see lib/lzdecoder.c */
+
+typedef struct siena_mc_boot_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_BOOT_MAGIC */
+ efx_word_t hdr_version; /* this structure definition is version 1 */
+ efx_byte_t board_type;
+ efx_byte_t firmware_version_a;
+ efx_byte_t firmware_version_b;
+ efx_byte_t firmware_version_c;
+ efx_word_t checksum; /* of whole header area + firmware image */
+ efx_word_t firmware_version_d;
+ efx_byte_t mcfw_subtype;
+ efx_byte_t generation; /* Valid for medford, SBZ for earlier chips */
+ efx_dword_t firmware_text_offset; /* offset to firmware .text */
+ efx_dword_t firmware_text_size; /* length of firmware .text, in bytes */
+ efx_dword_t firmware_data_offset; /* offset to firmware .data */
+ efx_dword_t firmware_data_size; /* length of firmware .data, in bytes */
+ efx_byte_t spi_rate; /* SPI rate for reading image, 0 is BootROM default */
+ efx_byte_t spi_phase_adj; /* SPI SDO/SCL phase adjustment, 0 is default (no adj) */
+ efx_word_t xpm_sector; /* The sector that contains the key, or 0xffff if unsigned (medford) SBZ (earlier) */
+ efx_dword_t reserved_c[7]; /* (set to 0) */
+} siena_mc_boot_hdr_t;
+
+#define SIENA_MC_BOOT_HDR_PADDING \
+ (SIENA_MC_BOOT_HDR_LEN - sizeof(siena_mc_boot_hdr_t))
+
+#define SIENA_MC_STATIC_CONFIG_MAGIC (0xBDCF5555)
+#define SIENA_MC_STATIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_static_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_STATIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t static_vpd_offset;
+ efx_dword_t static_vpd_length;
+ efx_dword_t capabilities;
+ efx_byte_t mac_addr_base[6];
+ efx_byte_t green_mode_cal; /* Green mode calibration result */
+ efx_byte_t green_mode_valid; /* Whether cal holds a valid value */
+ efx_word_t mac_addr_count;
+ efx_word_t mac_addr_stride;
+ efx_word_t calibrated_vref; /* Vref as measured during production */
+ efx_word_t adc_vref; /* Vref as read by ADC */
+ efx_dword_t reserved2[1]; /* (write as zero) */
+ efx_dword_t num_dbi_items;
+ struct {
+ efx_word_t addr;
+ efx_word_t byte_enables;
+ efx_dword_t value;
+ } dbi[];
+} siena_mc_static_config_hdr_t;
+
+/* This prefixes a valid XIP partition */
+#define XIP_PARTITION_MAGIC (0x51DEC0DE)
+
+#define SIENA_MC_DYNAMIC_CONFIG_MAGIC (0xBDCFDDDD)
+#define SIENA_MC_DYNAMIC_CONFIG_VERSION (0)
+
+typedef struct siena_mc_fw_version_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t fw_subtype;
+ efx_word_t version_w;
+ efx_word_t version_x;
+ efx_word_t version_y;
+ efx_word_t version_z;
+} siena_mc_fw_version_t;
+
+typedef struct siena_mc_dynamic_config_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_DYNAMIC_CONFIG_MAGIC */
+ efx_word_t length; /* of header area (i.e. not including VPD) */
+ efx_byte_t version;
+ efx_byte_t csum; /* over header area (i.e. not including VPD) */
+ efx_dword_t dynamic_vpd_offset;
+ efx_dword_t dynamic_vpd_length;
+ efx_dword_t num_fw_version_items;
+ siena_mc_fw_version_t fw_version[];
+} siena_mc_dynamic_config_hdr_t;
+
+#define SIENA_MC_EXPROM_SINGLE_MAGIC (0xAA55) /* little-endian uint16_t */
+
+#define SIENA_MC_EXPROM_COMBO_MAGIC (0xB0070102) /* little-endian uint32_t */
+#define SIENA_MC_EXPROM_COMBO_V2_MAGIC (0xB0070103) /* little-endian uint32_t */
+
+typedef struct siena_mc_combo_rom_hdr_s { /* GENERATED BY scripts/genfwdef */
+ efx_dword_t magic; /* = SIENA_MC_EXPROM_COMBO_MAGIC or SIENA_MC_EXPROM_COMBO_V2_MAGIC */
+ union {
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk0_off;/* infoblk offset */
+ efx_word_t infoblk1_off;/* infoblk offset */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v1;
+ struct {
+ efx_dword_t len1; /* length of first image */
+ efx_dword_t len2; /* length of second image */
+ efx_dword_t off1; /* offset of first byte to edit to combine images */
+ efx_dword_t off2; /* offset of second byte to edit to combine images */
+ efx_word_t infoblk_off;/* infoblk start offset */
+ efx_word_t infoblk_count;/* infoblk count */
+ efx_byte_t infoblk_len;/* length of space reserved for one infoblk structure */
+ efx_byte_t reserved[7];/* (set to 0) */
+ } v2;
+ } data;
+} siena_mc_combo_rom_hdr_t;
+
+#pragma pack()
+
+#endif /* _SYS_SIENA_FLASH_H */
diff --git a/drivers/net/sfc/base/siena_impl.h b/drivers/net/sfc/base/siena_impl.h
new file mode 100644
index 00000000..ea6de983
--- /dev/null
+++ b/drivers/net/sfc/base/siena_impl.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#ifndef _SYS_SIENA_IMPL_H
+#define _SYS_SIENA_IMPL_H
+
+#include "efx.h"
+#include "efx_regs.h"
+#include "efx_mcdi.h"
+#include "siena_flash.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SIENA_NVRAM_CHUNK 0x80
+
+extern __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern void
+siena_nic_fini(
+ __in efx_nic_t *enp);
+
+extern void
+siena_nic_unprobe(
+ __in efx_nic_t *enp);
+
+#define SIENA_SRAM_ROWS 0x12000
+
+extern void
+siena_sram_init(
+ __in efx_nic_t *enp);
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func);
+
+#endif /* EFSYS_OPT_DIAG */
+
+#if EFSYS_OPT_MCDI
+
+extern __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp);
+
+extern void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len);
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length);
+
+extern efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp);
+
+extern void
+siena_mcdi_fini(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp);
+
+extern void
+siena_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp);
+
+#endif /* EFSYS_OPT_MCDI */
+
+#if EFSYS_OPT_NVRAM || EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep);
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+#if EFSYS_OPT_DIAG
+
+extern __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_DIAG */
+
+extern __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4]);
+
+extern __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4]);
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_VPD
+
+extern __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep);
+
+extern __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp);
+
+extern __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size);
+
+extern void
+siena_vpd_fini(
+ __in efx_nic_t *enp);
+
+#endif /* EFSYS_OPT_VPD */
+
+typedef struct siena_link_state_s {
+ uint32_t sls_adv_cap_mask;
+ uint32_t sls_lp_cap_mask;
+ unsigned int sls_fcntl;
+ efx_link_mode_t sls_link_mode;
+#if EFSYS_OPT_LOOPBACK
+ efx_loopback_type_t sls_loopback;
+#endif
+ boolean_t sls_mac_up;
+} siena_link_state_t;
+
+extern void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp);
+
+extern __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t on);
+
+extern __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip);
+
+#if EFSYS_OPT_PHY_STATS
+
+extern void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat);
+
+extern __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat);
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+extern __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count);
+
+extern void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type);
+
+#endif /* EFSYS_OPT_BIST */
+
+extern __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep);
+
+extern __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp);
+
+extern __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp);
+
+extern __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu);
+
+#if EFSYS_OPT_LOOPBACK
+
+extern __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type);
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+extern __checkReturn efx_rc_t
+siena_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size);
+
+extern __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp);
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_SIENA_IMPL_H */
diff --git a/drivers/net/sfc/base/siena_mac.c b/drivers/net/sfc/base/siena_mac.c
new file mode 100644
index 00000000..29bbff8a
--- /dev/null
+++ b/drivers/net/sfc/base/siena_mac.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ __checkReturn efx_rc_t
+siena_mac_poll(
+ __in efx_nic_t *enp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_fcntl = sls.sls_fcntl;
+
+ *link_modep = sls.sls_link_mode;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_up(
+ __in efx_nic_t *enp,
+ __out boolean_t *mac_upp)
+{
+ siena_link_state_t sls;
+ efx_rc_t rc;
+
+ /*
+ * Because Siena doesn't *require* polling, we can't rely on
+ * siena_mac_poll() being executed to populate epp->ep_mac_up.
+ */
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail1;
+
+ *mac_upp = sls.sls_mac_up;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_mac_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_oword_t multicast_hash[2];
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_MAC_IN_LEN,
+ MC_CMD_SET_MAC_OUT_LEN),
+ MAX(MC_CMD_SET_MCAST_HASH_IN_LEN,
+ MC_CMD_SET_MCAST_HASH_OUT_LEN))];
+ unsigned int fcntl;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MAC;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MAC_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MAC_OUT_LEN;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_MTU, epp->ep_mac_pdu);
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_DRAIN, epp->ep_mac_drain ? 1 : 0);
+ EFX_MAC_ADDR_COPY(MCDI_IN2(req, uint8_t, SET_MAC_IN_ADDR),
+ epp->ep_mac_addr);
+ MCDI_IN_POPULATE_DWORD_2(req, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, !epp->ep_all_unicst,
+ SET_MAC_IN_REJECT_BRDCST, !epp->ep_brdcst);
+
+ if (epp->ep_fcntl_autoneg)
+ /* efx_fcntl_set() has already set the phy capabilities */
+ fcntl = MC_CMD_FCNTL_AUTO;
+ else if (epp->ep_fcntl & EFX_FCNTL_RESPOND)
+ fcntl = (epp->ep_fcntl & EFX_FCNTL_GENERATE)
+ ? MC_CMD_FCNTL_BIDIR
+ : MC_CMD_FCNTL_RESPOND;
+ else
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_IN_SET_DWORD(req, SET_MAC_IN_FCNTL, fcntl);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* Push multicast hash */
+
+ if (epp->ep_all_mulcst) {
+ /* A hash matching all multicast is all 1s */
+ EFX_SET_OWORD(multicast_hash[0]);
+ EFX_SET_OWORD(multicast_hash[1]);
+ } else if (epp->ep_mulcst) {
+ /* Use the hash set by the multicast list */
+ multicast_hash[0] = epp->ep_multicst_hash[0];
+ multicast_hash[1] = epp->ep_multicst_hash[1];
+ } else {
+ /* A hash matching no traffic is simply 0 */
+ EFX_ZERO_OWORD(multicast_hash[0]);
+ EFX_ZERO_OWORD(multicast_hash[1]);
+ }
+
+ /*
+ * Broadcast packets go through the multicast hash filter.
+ * The IEEE 802.3 CRC32 of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask (bit 0x7f in the
+ * second octword).
+ */
+ if (epp->ep_brdcst) {
+ /*
+ * NOTE: due to constant folding, some of this evaluates
+ * to null expressions, giving E_EXPR_NULL_EFFECT during
+ * lint on Illumos. No good way to fix this without
+ * explicit coding the individual word/bit setting.
+ * So just suppress lint for this one line.
+ */
+ /* LINTED */
+ EFX_SET_OWORD_BIT(multicast_hash[1], 0x7f);
+ }
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_MCAST_HASH;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_MCAST_HASH_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_MCAST_HASH_OUT_LEN;
+
+ memcpy(MCDI_IN2(req, uint8_t, SET_MCAST_HASH_IN_HASH0),
+ multicast_hash, sizeof (multicast_hash));
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#if EFSYS_OPT_LOOPBACK
+
+ __checkReturn efx_rc_t
+siena_mac_loopback_set(
+ __in efx_nic_t *enp,
+ __in efx_link_mode_t link_mode,
+ __in efx_loopback_type_t loopback_type)
+{
+ efx_port_t *epp = &(enp->en_port);
+ const efx_phy_ops_t *epop = epp->ep_epop;
+ efx_loopback_type_t old_loopback_type;
+ efx_link_mode_t old_loopback_link_mode;
+ efx_rc_t rc;
+
+ /* The PHY object handles this on Siena */
+ old_loopback_type = epp->ep_loopback_type;
+ old_loopback_link_mode = epp->ep_loopback_link_mode;
+ epp->ep_loopback_type = loopback_type;
+ epp->ep_loopback_link_mode = link_mode;
+
+ if ((rc = epop->epo_reconfigure(enp)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ epp->ep_loopback_type = old_loopback_type;
+ epp->ep_loopback_link_mode = old_loopback_link_mode;
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_LOOPBACK */
+
+#if EFSYS_OPT_MAC_STATS
+
+ __checkReturn efx_rc_t
+siena_mac_stats_get_mask(
+ __in efx_nic_t *enp,
+ __inout_bcount(mask_size) uint32_t *maskp,
+ __in size_t mask_size)
+{
+ const struct efx_mac_stats_range siena_stats[] = {
+ { EFX_MAC_RX_OCTETS, EFX_MAC_RX_GE_15XX_PKTS },
+ /* EFX_MAC_RX_ERRORS is not supported */
+ { EFX_MAC_RX_FCS_ERRORS, EFX_MAC_TX_EX_DEF_PKTS },
+ };
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(enp))
+
+ if ((rc = efx_mac_stats_mask_add_ranges(maskp, mask_size,
+ siena_stats, EFX_ARRAY_SIZE(siena_stats))) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#define SIENA_MAC_STAT_READ(_esmp, _field, _eqp) \
+ EFSYS_MEM_READQ((_esmp), (_field) * sizeof (efx_qword_t), _eqp)
+
+ __checkReturn efx_rc_t
+siena_mac_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_MAC_NSTATS) efsys_stat_t *stat,
+ __inout_opt uint32_t *generationp)
+{
+ efx_qword_t value;
+ efx_qword_t generation_start;
+ efx_qword_t generation_end;
+
+ _NOTE(ARGUNUSED(enp))
+
+ /* Read END first so we don't race with the MC */
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_END,
+ &generation_end);
+ EFSYS_MEM_READ_BARRIER();
+
+ /* TX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_CONTROL_PKTS, &value);
+ EFSYS_STAT_SUBR_QWORD(&(stat[EFX_MAC_TX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LT64_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_TX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_SGL_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_MULT_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_LATE_COLLISION_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_LATE_COL_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_DEFERRED_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_DEF_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS,
+ &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_TX_EX_DEF_PKTS]), &value);
+
+ /* RX */
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BYTES, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_OCTETS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_UNICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MULTICAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MULTICST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BROADCAST_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_BRDCST_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_PAUSE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_PAUSE_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_UNDERSIZE_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_64_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_LE_64_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_65_TO_127_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_65_TO_127_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_128_TO_255_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_128_TO_255_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_256_TO_511_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_256_TO_511_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_512_TO_1023_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_512_TO_1023_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_1024_TO_15XX_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_1024_TO_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_GTJUMBO_PKTS, &value);
+ EFSYS_STAT_INCR_QWORD(&(stat[EFX_MAC_RX_GE_15XX_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_BAD_FCS_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FCS_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_OVERFLOW_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_DROP_EVENTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_FALSE_CARRIER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_FALSE_CARRIER_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_SYMBOL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_ALIGN_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_ALIGN_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_INTERNAL_ERRORS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_JABBER_PKTS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_JABBER_PKTS]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_CHAR_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_CHAR_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_CHAR_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES01_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE0_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE1_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_LANES23_DISP_ERR, &value);
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE2_DISP_ERR]),
+ &(value.eq_dword[0]));
+ EFSYS_STAT_SET_DWORD(&(stat[EFX_MAC_RX_LANE3_DISP_ERR]),
+ &(value.eq_dword[1]));
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_MATCH_FAULT, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_MATCH_FAULT]), &value);
+
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_RX_NODESC_DROPS, &value);
+ EFSYS_STAT_SET_QWORD(&(stat[EFX_MAC_RX_NODESC_DROP_CNT]), &value);
+
+ EFSYS_DMA_SYNC_FOR_KERNEL(esmp, 0, EFX_MAC_STATS_SIZE);
+ EFSYS_MEM_READ_BARRIER();
+ SIENA_MAC_STAT_READ(esmp, MC_CMD_MAC_GENERATION_START,
+ &generation_start);
+
+ /* Check that we didn't read the stats in the middle of a DMA */
+ /* Not a good enough check ? */
+ if (memcmp(&generation_start, &generation_end,
+ sizeof (generation_start)))
+ return (EAGAIN);
+
+ if (generationp)
+ *generationp = EFX_QWORD_FIELD(generation_start, EFX_DWORD_0);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_MAC_STATS */
+
+ __checkReturn efx_rc_t
+siena_mac_pdu_get(
+ __in efx_nic_t *enp,
+ __out size_t *pdu)
+{
+ return (ENOTSUP);
+}
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_mcdi.c b/drivers/net/sfc/base/siena_mcdi.c
new file mode 100644
index 00000000..63c29fcb
--- /dev/null
+++ b/drivers/net/sfc/base/siena_mcdi.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2012-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA && EFSYS_OPT_MCDI
+
+#define SIENA_MCDI_PDU(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_PDU_OFST >> 2 \
+ : MC_SMEM_P1_PDU_OFST >> 2)
+
+#define SIENA_MCDI_DOORBELL(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_DOORBELL_OFST >> 2 \
+ : MC_SMEM_P1_DOORBELL_OFST >> 2)
+
+#define SIENA_MCDI_STATUS(_emip) \
+ (((emip)->emi_port == 1) \
+ ? MC_SMEM_P0_STATUS_OFST >> 2 \
+ : MC_SMEM_P1_STATUS_OFST >> 2)
+
+
+ void
+siena_mcdi_send_request(
+ __in efx_nic_t *enp,
+ __in_bcount(hdr_len) void *hdrp,
+ __in size_t hdr_len,
+ __in_bcount(sdu_len) void *sdup,
+ __in size_t sdu_len)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t dword;
+ unsigned int pdur;
+ unsigned int dbr;
+ unsigned int pos;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+ dbr = SIENA_MCDI_DOORBELL(emip);
+
+ /* Write the header */
+ EFSYS_ASSERT3U(hdr_len, ==, sizeof (efx_dword_t));
+ dword = *(efx_dword_t *)hdrp;
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, pdur, &dword, B_TRUE);
+
+ /* Write the payload */
+ for (pos = 0; pos < sdu_len; pos += sizeof (efx_dword_t)) {
+ dword = *(efx_dword_t *)((uint8_t *)sdup + pos);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + 1 + (pos >> 2), &dword, B_FALSE);
+ }
+
+ /* Ring the doorbell */
+ EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xd004be11);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, dbr, &dword, B_FALSE);
+}
+
+ efx_rc_t
+siena_mcdi_poll_reboot(
+ __in efx_nic_t *enp)
+{
+#if 1
+ /*
+ * XXX Bug 25922, bug 26099: This function is not being used
+ * properly. Until its callers are fixed, it should always
+ * return 0.
+ */
+ _NOTE(ARGUNUSED(enp))
+ return (0);
+#else
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int rebootr;
+ efx_dword_t dword;
+ uint32_t value;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ rebootr = SIENA_MCDI_STATUS(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+ value = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+
+ if (value == 0)
+ return (0);
+
+ EFX_ZERO_DWORD(dword);
+ EFX_BAR_TBL_WRITED(enp, FR_CZ_MC_TREG_SMEM, rebootr, &dword, B_FALSE);
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return (EINTR);
+ else
+ return (EIO);
+#endif
+}
+
+extern __checkReturn boolean_t
+siena_mcdi_poll_response(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_dword_t hdr;
+ unsigned int pdur;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM, pdur, &hdr, B_FALSE);
+ return (EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE) ? B_TRUE : B_FALSE);
+}
+
+ void
+siena_mcdi_read_response(
+ __in efx_nic_t *enp,
+ __out_bcount(length) void *bufferp,
+ __in size_t offset,
+ __in size_t length)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int pdur;
+ unsigned int pos;
+ efx_dword_t data;
+
+ EFSYS_ASSERT(emip->emi_port == 1 || emip->emi_port == 2);
+ pdur = SIENA_MCDI_PDU(emip);
+
+ for (pos = 0; pos < length; pos += sizeof (efx_dword_t)) {
+ EFX_BAR_TBL_READD(enp, FR_CZ_MC_TREG_SMEM,
+ pdur + ((offset + pos) >> 2), &data, B_FALSE);
+ memcpy((uint8_t *)bufferp + pos, &data,
+ MIN(sizeof (data), length - pos));
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_init(
+ __in efx_nic_t *enp,
+ __in const efx_mcdi_transport_t *mtp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ efx_oword_t oword;
+ unsigned int portnum;
+ efx_rc_t rc;
+
+ _NOTE(ARGUNUSED(mtp))
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine the port number to use for MCDI */
+ EFX_BAR_READO(enp, FR_AZ_CS_DEBUG_REG, &oword);
+ portnum = EFX_OWORD_FIELD(oword, FRF_CZ_CS_PORT_NUM);
+
+ if (portnum == 0) {
+ /* Presumably booted from ROM; only MCDI port 1 will work */
+ emip->emi_port = 1;
+ } else if (portnum <= 2) {
+ emip->emi_port = portnum;
+ } else {
+ rc = EINVAL;
+ goto fail1;
+ }
+
+ /* Siena BootROM and firmware only support MCDIv1 */
+ emip->emi_max_version = 1;
+
+ /*
+ * Wipe the atomic reboot status so subsequent MCDI requests succeed.
+ * BOOT_STATUS is preserved so eno_nic_probe() can boot out of the
+ * assertion handler.
+ */
+ (void) siena_mcdi_poll_reboot(enp);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_mcdi_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ __checkReturn efx_rc_t
+siena_mcdi_feature_supported(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_feature_id_t id,
+ __out boolean_t *supportedp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ switch (id) {
+ case EFX_MCDI_FEATURE_FW_UPDATE:
+ case EFX_MCDI_FEATURE_LINK_CONTROL:
+ case EFX_MCDI_FEATURE_MACADDR_CHANGE:
+ case EFX_MCDI_FEATURE_MAC_SPOOFING:
+ *supportedp = B_TRUE;
+ break;
+ default:
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+/* Default timeout for MCDI command processing. */
+#define SIENA_MCDI_CMD_TIMEOUT_US (10 * 1000 * 1000)
+
+ void
+siena_mcdi_get_timeout(
+ __in efx_nic_t *enp,
+ __in efx_mcdi_req_t *emrp,
+ __out uint32_t *timeoutp)
+{
+ _NOTE(ARGUNUSED(enp, emrp))
+
+ *timeoutp = SIENA_MCDI_CMD_TIMEOUT_US;
+}
+
+
+#endif /* EFSYS_OPT_SIENA && EFSYS_OPT_MCDI */
diff --git a/drivers/net/sfc/base/siena_nic.c b/drivers/net/sfc/base/siena_nic.c
new file mode 100644
index 00000000..129b854b
--- /dev/null
+++ b/drivers/net/sfc/base/siena_nic.c
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+#include "mcdi_mon.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+static __checkReturn efx_rc_t
+siena_nic_get_partn_mask(
+ __in efx_nic_t *enp,
+ __out unsigned int *maskp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_NVRAM_TYPES_IN_LEN,
+ MC_CMD_NVRAM_TYPES_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_NVRAM_TYPES;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_NVRAM_TYPES_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_NVRAM_TYPES_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ *maskp = MCDI_OUT_DWORD(req, NVRAM_TYPES_OUT_TYPES);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+static __checkReturn efx_rc_t
+siena_board_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t mac_addr[6];
+ efx_dword_t capabilities;
+ uint32_t board_type;
+ uint32_t nevq, nrxq, ntxq;
+ efx_rc_t rc;
+
+ /* External port identifier using one-based port numbering */
+ encp->enc_external_port = (uint8_t)enp->en_mcdi.em_emip.emi_port;
+
+ /* Board configuration */
+ if ((rc = efx_mcdi_get_board_cfg(enp, &board_type,
+ &capabilities, mac_addr)) != 0)
+ goto fail1;
+
+ EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr);
+
+ encp->enc_board_type = board_type;
+
+ /*
+ * There is no possibility to determine the number of PFs on Siena
+ * by issuing MCDI request, and it is not an easy task to find the
+ * value based on the board type, so 'enc_hw_pf_count' is set to 1
+ */
+ encp->enc_hw_pf_count = 1;
+
+ /* Additional capabilities */
+ encp->enc_clk_mult = 1;
+ if (EFX_DWORD_FIELD(capabilities, MC_CMD_CAPABILITIES_TURBO)) {
+ enp->en_features |= EFX_FEATURE_TURBO;
+
+ if (EFX_DWORD_FIELD(capabilities,
+ MC_CMD_CAPABILITIES_TURBO_ACTIVE)) {
+ encp->enc_clk_mult = 2;
+ }
+ }
+
+ encp->enc_evq_timer_quantum_ns =
+ EFX_EVQ_SIENA_TIMER_QUANTUM_NS / encp->enc_clk_mult;
+ encp->enc_evq_timer_max_us = (encp->enc_evq_timer_quantum_ns <<
+ FRF_CZ_TC_TIMER_VAL_WIDTH) / 1000;
+
+ /* When hash header insertion is enabled, Siena inserts 16 bytes */
+ encp->enc_rx_prefix_size = 16;
+
+ /* Alignment for receive packet DMA buffers */
+ encp->enc_rx_buf_align_start = 1;
+ encp->enc_rx_buf_align_end = 1;
+
+ /* Alignment for WPTR updates */
+ encp->enc_rx_push_align = 1;
+
+ encp->enc_tx_dma_desc_size_max = EFX_MASK32(FSF_AZ_TX_KER_BYTE_COUNT);
+ /* Fragments must not span 4k boundaries. */
+ encp->enc_tx_dma_desc_boundary = 4096;
+
+ /* Resource limits */
+ rc = efx_mcdi_get_resource_limits(enp, &nevq, &nrxq, &ntxq);
+ if (rc != 0) {
+ if (rc != ENOTSUP)
+ goto fail2;
+
+ nevq = 1024;
+ nrxq = EFX_RXQ_LIMIT_TARGET;
+ ntxq = EFX_TXQ_LIMIT_TARGET;
+ }
+ encp->enc_evq_limit = nevq;
+ encp->enc_rxq_limit = MIN(EFX_RXQ_LIMIT_TARGET, nrxq);
+ encp->enc_txq_limit = MIN(EFX_TXQ_LIMIT_TARGET, ntxq);
+
+ encp->enc_txq_max_ndescs = 4096;
+
+ encp->enc_buftbl_limit = SIENA_SRAM_ROWS -
+ (encp->enc_txq_limit * EFX_TXQ_DC_NDESCS(EFX_TXQ_DC_SIZE)) -
+ (encp->enc_rxq_limit * EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ encp->enc_hw_tx_insert_vlan_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_enabled = B_FALSE;
+ encp->enc_fw_assisted_tso_v2_n_contexts = 0;
+ encp->enc_allow_set_mac_with_installed_filters = B_TRUE;
+ encp->enc_rx_packed_stream_supported = B_FALSE;
+ encp->enc_rx_var_packed_stream_supported = B_FALSE;
+
+ /* Siena supports two 10G ports, and 8 lanes of PCIe Gen2 */
+ encp->enc_required_pcie_bandwidth_mbps = 2 * 10000;
+ encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN2;
+
+ encp->enc_fw_verified_nvram_update_required = B_FALSE;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn efx_rc_t
+siena_phy_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_rc_t rc;
+
+ /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */
+ if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0)
+ goto fail1;
+
+#if EFSYS_OPT_PHY_STATS
+ /* Convert the MCDI statistic mask into the EFX_PHY_STAT mask */
+ siena_phy_decode_stats(enp, encp->enc_mcdi_phy_stat_mask,
+ NULL, &encp->enc_phy_stat_mask, NULL);
+#endif /* EFSYS_OPT_PHY_STATS */
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_probe(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ siena_link_state_t sls;
+ unsigned int mask;
+ efx_oword_t oword;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Test BIU */
+ if ((rc = efx_nic_biu_test(enp)) != 0)
+ goto fail1;
+
+ /* Clear the region register */
+ EFX_POPULATE_OWORD_4(oword,
+ FRF_AZ_ADR_REGION0, 0,
+ FRF_AZ_ADR_REGION1, (1 << 16),
+ FRF_AZ_ADR_REGION2, (2 << 16),
+ FRF_AZ_ADR_REGION3, (3 << 16));
+ EFX_BAR_WRITEO(enp, FR_AZ_ADR_REGION_REG, &oword);
+
+ /* Read clear any assertion state */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail2;
+
+ /* Exit the assertion handler */
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail3;
+
+ /* Wrestle control from the BMC */
+ if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0)
+ goto fail4;
+
+ if ((rc = siena_board_cfg(enp)) != 0)
+ goto fail5;
+
+ if ((rc = siena_phy_cfg(enp)) != 0)
+ goto fail6;
+
+ /* Obtain the default PHY advertised capabilities */
+ if ((rc = siena_nic_reset(enp)) != 0)
+ goto fail7;
+ if ((rc = siena_phy_get_link(enp, &sls)) != 0)
+ goto fail8;
+ epp->ep_default_adv_cap_mask = sls.sls_adv_cap_mask;
+ epp->ep_adv_cap_mask = sls.sls_adv_cap_mask;
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+ if ((rc = siena_nic_get_partn_mask(enp, &mask)) != 0)
+ goto fail9;
+ enp->en_u.siena.enu_partn_mask = mask;
+#endif
+
+#if EFSYS_OPT_MAC_STATS
+ /* Wipe the MAC statistics */
+ if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0)
+ goto fail10;
+#endif
+
+#if EFSYS_OPT_LOOPBACK
+ if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0)
+ goto fail11;
+#endif
+
+#if EFSYS_OPT_MON_STATS
+ if ((rc = mcdi_mon_cfg_build(enp)) != 0)
+ goto fail12;
+#endif
+
+ encp->enc_features = enp->en_features;
+
+ return (0);
+
+#if EFSYS_OPT_MON_STATS
+fail12:
+ EFSYS_PROBE(fail12);
+#endif
+#if EFSYS_OPT_LOOPBACK
+fail11:
+ EFSYS_PROBE(fail11);
+#endif
+#if EFSYS_OPT_MAC_STATS
+fail10:
+ EFSYS_PROBE(fail10);
+#endif
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+fail9:
+ EFSYS_PROBE(fail9);
+#endif
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_reset(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* siena_nic_reset() is called to recover from BADASSERT failures. */
+ if ((rc = efx_mcdi_read_assertion(enp)) != 0)
+ goto fail1;
+ if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0)
+ goto fail2;
+
+ /*
+ * Bug24908: ENTITY_RESET_IN_LEN is non zero but zero may be supplied
+ * for backwards compatibility with PORT_RESET_IN_LEN.
+ */
+ EFX_STATIC_ASSERT(MC_CMD_ENTITY_RESET_OUT_LEN == 0);
+
+ req.emr_cmd = MC_CMD_ENTITY_RESET;
+ req.emr_in_buf = NULL;
+ req.emr_in_length = 0;
+ req.emr_out_buf = NULL;
+ req.emr_out_length = 0;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+static void
+siena_nic_rx_cfg(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ /*
+ * RX_INGR_EN is always enabled on Siena, because we rely on
+ * the RX parser to be resiliant to missing SOP/EOP.
+ */
+ EFX_BAR_READO(enp, FR_AZ_RX_CFG_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_BZ_RX_INGR_EN, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_CFG_REG, &oword);
+
+ /* Disable parsing of additional 802.1Q in Q packets */
+ EFX_BAR_READO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+ EFX_SET_OWORD_FIELD(oword, FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_FILTER_CTL_REG, &oword);
+}
+
+static void
+siena_nic_usrev_dis(
+ __in efx_nic_t *enp)
+{
+ efx_oword_t oword;
+
+ EFX_POPULATE_OWORD_1(oword, FRF_CZ_USREV_DIS, 1);
+ EFX_BAR_WRITEO(enp, FR_CZ_USR_EV_CFG, &oword);
+}
+
+ __checkReturn efx_rc_t
+siena_nic_init(
+ __in efx_nic_t *enp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA);
+
+ /* Enable reporting of some events (e.g. link change) */
+ if ((rc = efx_mcdi_log_ctrl(enp)) != 0)
+ goto fail1;
+
+ siena_sram_init(enp);
+
+ /* Configure Siena's RX block */
+ siena_nic_rx_cfg(enp);
+
+ /* Disable USR_EVents for now */
+ siena_nic_usrev_dis(enp);
+
+ /* bug17057: Ensure set_link is called */
+ if ((rc = siena_phy_reconfigure(enp)) != 0)
+ goto fail2;
+
+ enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V1;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_nic_fini(
+ __in efx_nic_t *enp)
+{
+ _NOTE(ARGUNUSED(enp))
+}
+
+ void
+siena_nic_unprobe(
+ __in efx_nic_t *enp)
+{
+#if EFSYS_OPT_MON_STATS
+ mcdi_mon_cfg_free(enp);
+#endif /* EFSYS_OPT_MON_STATS */
+ (void) efx_mcdi_drv_attach(enp, B_FALSE);
+}
+
+#if EFSYS_OPT_DIAG
+
+static efx_register_set_t __siena_registers[] = {
+ { FR_AZ_ADR_REGION_REG_OFST, 0, 1 },
+ { FR_CZ_USR_EV_CFG_OFST, 0, 1 },
+ { FR_AZ_RX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_TX_RESERVED_REG_OFST, 0, 1 },
+ { FR_AZ_SRM_TX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_CFG_REG_OFST, 0, 1 },
+ { FR_AZ_RX_DC_PF_WM_REG_OFST, 0, 1 },
+ { FR_AZ_DP_CTRL_REG_OFST, 0, 1 },
+ { FR_BZ_RX_RSS_TKEY_REG_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG1_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG2_OFST, 0, 1},
+ { FR_CZ_RX_RSS_IPV6_REG3_OFST, 0, 1}
+};
+
+static const uint32_t __siena_register_masks[] = {
+ 0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF,
+ 0x000103FF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000,
+ 0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF,
+ 0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF,
+ 0x001FFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000003, 0x00000000, 0x00000000, 0x00000000,
+ 0x000003FF, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000FFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000
+};
+
+static efx_register_set_t __siena_tables[] = {
+ { FR_AZ_RX_FILTER_TBL0_OFST, FR_AZ_RX_FILTER_TBL0_STEP,
+ FR_AZ_RX_FILTER_TBL0_ROWS },
+ { FR_CZ_RX_MAC_FILTER_TBL0_OFST, FR_CZ_RX_MAC_FILTER_TBL0_STEP,
+ FR_CZ_RX_MAC_FILTER_TBL0_ROWS },
+ { FR_AZ_RX_DESC_PTR_TBL_OFST,
+ FR_AZ_RX_DESC_PTR_TBL_STEP, FR_CZ_RX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TX_DESC_PTR_TBL_OFST,
+ FR_AZ_TX_DESC_PTR_TBL_STEP, FR_CZ_TX_DESC_PTR_TBL_ROWS },
+ { FR_AZ_TIMER_TBL_OFST, FR_AZ_TIMER_TBL_STEP, FR_CZ_TIMER_TBL_ROWS },
+ { FR_CZ_TX_FILTER_TBL0_OFST,
+ FR_CZ_TX_FILTER_TBL0_STEP, FR_CZ_TX_FILTER_TBL0_ROWS },
+ { FR_CZ_TX_MAC_FILTER_TBL0_OFST,
+ FR_CZ_TX_MAC_FILTER_TBL0_STEP, FR_CZ_TX_MAC_FILTER_TBL0_ROWS }
+};
+
+static const uint32_t __siena_table_masks[] = {
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000003FF,
+ 0xFFFF0FFF, 0xFFFFFFFF, 0x00000E7F, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x01800000, 0x00000000,
+ 0xFFFFFFFE, 0x0FFFFFFF, 0x0C000000, 0x00000000,
+ 0x3FFFFFFF, 0x00000000, 0x00000000, 0x00000000,
+ 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x000013FF,
+ 0xFFFF07FF, 0xFFFFFFFF, 0x0000007F, 0x00000000,
+};
+
+ __checkReturn efx_rc_t
+siena_nic_register_test(
+ __in efx_nic_t *enp)
+{
+ efx_register_set_t *rsp;
+ const uint32_t *dwordp;
+ unsigned int nitems;
+ unsigned int count;
+ efx_rc_t rc;
+
+ /* Fill out the register mask entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_register_masks)
+ == EFX_ARRAY_SIZE(__siena_registers) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_registers);
+ dwordp = __siena_register_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_registers + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ /* Fill out the register table entries */
+ EFX_STATIC_ASSERT(EFX_ARRAY_SIZE(__siena_table_masks)
+ == EFX_ARRAY_SIZE(__siena_tables) * 4);
+
+ nitems = EFX_ARRAY_SIZE(__siena_tables);
+ dwordp = __siena_table_masks;
+ for (count = 0; count < nitems; ++count) {
+ rsp = __siena_tables + count;
+ rsp->mask.eo_u32[0] = *dwordp++;
+ rsp->mask.eo_u32[1] = *dwordp++;
+ rsp->mask.eo_u32[2] = *dwordp++;
+ rsp->mask.eo_u32[3] = *dwordp++;
+ }
+
+ if ((rc = efx_nic_test_registers(enp, __siena_registers,
+ EFX_ARRAY_SIZE(__siena_registers))) != 0)
+ goto fail1;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_ALTERNATE,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail2;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BYTE_CHANGING,
+ EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail3;
+
+ if ((rc = efx_nic_test_tables(enp, __siena_tables,
+ EFX_PATTERN_BIT_SWEEP, EFX_ARRAY_SIZE(__siena_tables))) != 0)
+ goto fail4;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_nvram.c b/drivers/net/sfc/base/siena_nvram.c
new file mode 100644
index 00000000..af4cf172
--- /dev/null
+++ b/drivers/net/sfc/base/siena_nvram.c
@@ -0,0 +1,734 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+#if EFSYS_OPT_VPD || EFSYS_OPT_NVRAM
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_size(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *sizep)
+{
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = efx_mcdi_nvram_info(enp, partn, sizep,
+ NULL, NULL, NULL)) != 0) {
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_lock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_update_start(enp, partn)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_read(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_read(enp, partn, offset, data, chunk,
+ MC_CMD_NVRAM_READ_IN_V2_DEFAULT)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_erase(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __in size_t size)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_nvram_erase(enp, partn, offset, size)) != 0) {
+ goto fail1;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_write(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in unsigned int offset,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ size_t chunk;
+ efx_rc_t rc;
+
+ while (size > 0) {
+ chunk = MIN(size, SIENA_NVRAM_CHUNK);
+
+ if ((rc = efx_mcdi_nvram_write(enp, partn, offset,
+ data, chunk)) != 0) {
+ goto fail1;
+ }
+
+ size -= chunk;
+ data += chunk;
+ offset += chunk;
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_unlock(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ boolean_t reboot;
+ efx_rc_t rc;
+
+ /*
+ * Reboot into the new image only for PHYs. The driver has to
+ * explicitly cope with an MC reboot after a firmware update.
+ */
+ reboot = (partn == MC_CMD_NVRAM_TYPE_PHY_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_PHY_PORT1 ||
+ partn == MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO);
+
+ rc = efx_mcdi_nvram_update_finish(enp, partn, reboot, NULL);
+ if (rc != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_VPD || EFSYS_OPT_NVRAM */
+
+#if EFSYS_OPT_NVRAM
+
+typedef struct siena_parttbl_entry_s {
+ unsigned int partn;
+ unsigned int port;
+ efx_nvram_type_t nvtype;
+} siena_parttbl_entry_t;
+
+static siena_parttbl_entry_t siena_parttbl[] = {
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 1, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO, 2, EFX_NVRAM_NULLPHY},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 1, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW, 2, EFX_NVRAM_MC_FIRMWARE},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 1, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_MC_FW_BACKUP, 2, EFX_NVRAM_MC_GOLDEN},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 1, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM, 2, EFX_NVRAM_BOOTROM},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0, 1, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1, 2, EFX_NVRAM_BOOTROM_CFG},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT0, 1, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_PHY_PORT1, 2, EFX_NVRAM_PHY},
+ {MC_CMD_NVRAM_TYPE_FPGA, 1, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA, 2, EFX_NVRAM_FPGA},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 1, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FPGA_BACKUP, 2, EFX_NVRAM_FPGA_BACKUP},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 1, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_FC_FW, 2, EFX_NVRAM_FCFW},
+ {MC_CMD_NVRAM_TYPE_CPLD, 1, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_CPLD, 2, EFX_NVRAM_CPLD},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 1, EFX_NVRAM_LICENSE},
+ {MC_CMD_NVRAM_TYPE_LICENSE, 2, EFX_NVRAM_LICENSE}
+};
+
+ __checkReturn efx_rc_t
+siena_nvram_type_to_partn(
+ __in efx_nic_t *enp,
+ __in efx_nvram_type_t type,
+ __out uint32_t *partnp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ unsigned int i;
+
+ EFSYS_ASSERT3U(type, <, EFX_NVRAM_NTYPES);
+ EFSYS_ASSERT(partnp != NULL);
+
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_parttbl_entry_t *entry = &siena_parttbl[i];
+
+ if (entry->port == emip->emi_port && entry->nvtype == type) {
+ *partnp = entry->partn;
+ return (0);
+ }
+ }
+
+ return (ENOTSUP);
+}
+
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_nvram_test(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_parttbl_entry_t *entry;
+ unsigned int i;
+ efx_rc_t rc;
+
+ /*
+ * Iterate over the list of supported partition types
+ * applicable to *this* port
+ */
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ entry = &siena_parttbl[i];
+
+ if (entry->port != emip->emi_port ||
+ !(enp->en_u.siena.enu_partn_mask & (1 << entry->partn)))
+ continue;
+
+ if ((rc = efx_mcdi_nvram_test(enp, entry->partn)) != 0) {
+ goto fail1;
+ }
+ }
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+
+#define SIENA_DYNAMIC_CFG_SIZE(_nitems) \
+ (sizeof (siena_mc_dynamic_config_hdr_t) + ((_nitems) * \
+ sizeof (((siena_mc_dynamic_config_hdr_t *)NULL)->fw_version[0])))
+
+ __checkReturn efx_rc_t
+siena_nvram_get_dynamic_cfg(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in boolean_t vpd,
+ __out siena_mc_dynamic_config_hdr_t **dcfgp,
+ __out size_t *sizep)
+{
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int nversions;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1);
+
+ /*
+ * Allocate sufficient memory for the entire dynamiccfg area, even
+ * if we're not actually going to read in the VPD.
+ */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, dcfg);
+ if (dcfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)dcfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic */
+ if (EFX_DWORD_FIELD(dcfg->magic, EFX_DWORD_0)
+ != SIENA_MC_DYNAMIC_CONFIG_MAGIC)
+ goto invalid1;
+
+ /* All future versions of the structure must be backwards compatible */
+ EFX_STATIC_ASSERT(SIENA_MC_DYNAMIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nversions = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the partn size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size)
+ goto invalid2;
+
+ /* Verify the header has room for all it's versions */
+ if (hdr_length < SIENA_DYNAMIC_CFG_SIZE(0) ||
+ hdr_length < SIENA_DYNAMIC_CFG_SIZE(nversions))
+ goto invalid3;
+
+ /*
+ * Read the remaining portion of the dcfg, either including
+ * the whole of VPD (there is no vpd length in this structure,
+ * so we have to parse each tag), or just the dcfg header itself
+ */
+ region = vpd ? vpd_offset + vpd_length : hdr_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)dcfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail4;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ if (cksum != 0)
+ goto invalid4;
+
+ goto done;
+
+invalid4:
+ EFSYS_PROBE(invalid4);
+invalid3:
+ EFSYS_PROBE(invalid3);
+invalid2:
+ EFSYS_PROBE(invalid2);
+invalid1:
+ EFSYS_PROBE(invalid1);
+
+ /*
+ * Construct a new "null" dcfg, with an empty version vector,
+ * and an empty VPD chunk trailing. This has the neat side effect
+ * of testing the exception paths in the write path.
+ */
+ EFX_POPULATE_DWORD_1(dcfg->magic,
+ EFX_DWORD_0, SIENA_MC_DYNAMIC_CONFIG_MAGIC);
+ EFX_POPULATE_WORD_1(dcfg->length, EFX_WORD_0, sizeof (*dcfg));
+ EFX_POPULATE_BYTE_1(dcfg->version, EFX_BYTE_0,
+ SIENA_MC_DYNAMIC_CONFIG_VERSION);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, sizeof (*dcfg));
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, 0);
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items, EFX_DWORD_0, 0);
+
+done:
+ *dcfgp = dcfg;
+ *sizep = size;
+
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, dcfg);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_get_subtype(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_BOARD_CFG_IN_LEN,
+ MC_CMD_GET_BOARD_CFG_OUT_LENMAX)];
+ efx_word_t *fw_list;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_BOARD_CFG;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (req.emr_out_length_used <
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST +
+ (partn + 1) * sizeof (efx_word_t)) {
+ rc = ENOENT;
+ goto fail3;
+ }
+
+ fw_list = MCDI_OUT2(req, efx_word_t,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ *subtypep = EFX_WORD_FIELD(fw_list[partn], EFX_WORD_0);
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_get_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out uint32_t *subtypep,
+ __out_ecount(4) uint16_t version[4])
+{
+ siena_mc_dynamic_config_hdr_t *dcfg;
+ siena_parttbl_entry_t *entry;
+ uint32_t dcfg_partn;
+ unsigned int i;
+ efx_rc_t rc;
+
+ if ((1 << partn) & ~enp->en_u.siena.enu_partn_mask) {
+ rc = ENOTSUP;
+ goto fail1;
+ }
+
+ if ((rc = siena_nvram_get_subtype(enp, partn, subtypep)) != 0)
+ goto fail2;
+
+ /*
+ * Some partitions are accessible from both ports (for instance BOOTROM)
+ * Find the highest version reported by all dcfg structures on ports
+ * that have access to this partition.
+ */
+ version[0] = version[1] = version[2] = version[3] = 0;
+ for (i = 0; i < EFX_ARRAY_SIZE(siena_parttbl); i++) {
+ siena_mc_fw_version_t *verp;
+ unsigned int nitems;
+ uint16_t temp[4];
+ size_t length;
+
+ entry = &siena_parttbl[i];
+ if (entry->partn != partn)
+ continue;
+
+ dcfg_partn = (entry->port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+ /*
+ * Ingore missing partitions on port 2, assuming they're due
+ * to to running on a single port part.
+ */
+ if ((1 << dcfg_partn) & ~enp->en_u.siena.enu_partn_mask) {
+ if (entry->port == 2)
+ continue;
+ }
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items,
+ EFX_DWORD_0);
+ if (nitems < entry->partn)
+ goto done;
+
+ verp = &dcfg->fw_version[partn];
+ temp[0] = EFX_WORD_FIELD(verp->version_w, EFX_WORD_0);
+ temp[1] = EFX_WORD_FIELD(verp->version_x, EFX_WORD_0);
+ temp[2] = EFX_WORD_FIELD(verp->version_y, EFX_WORD_0);
+ temp[3] = EFX_WORD_FIELD(verp->version_z, EFX_WORD_0);
+ if (memcmp(version, temp, sizeof (temp)) < 0)
+ memcpy(version, temp, sizeof (temp));
+
+done:
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_rw_start(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __out size_t *chunk_sizep)
+{
+ efx_rc_t rc;
+
+ if ((rc = siena_nvram_partn_lock(enp, partn)) != 0)
+ goto fail1;
+
+ if (chunk_sizep != NULL)
+ *chunk_sizep = SIENA_NVRAM_CHUNK;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_rw_finish(
+ __in efx_nic_t *enp,
+ __in uint32_t partn)
+{
+ efx_rc_t rc;
+
+ if ((rc = siena_nvram_partn_unlock(enp, partn)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_nvram_partn_set_version(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __in_ecount(4) uint16_t version[4])
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ siena_mc_fw_version_t *fwverp;
+ uint32_t dcfg_partn;
+ size_t dcfg_size;
+ unsigned int hdr_length;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int nitems;
+ unsigned int required_hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ uint32_t subtype;
+ size_t length;
+ efx_rc_t rc;
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &dcfg_size)) != 0)
+ goto fail1;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &length)) != 0)
+ goto fail3;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+ nitems = EFX_DWORD_FIELD(dcfg->num_fw_version_items, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ /*
+ * NOTE: This function will blatt any fields trailing the version
+ * vector, or the VPD chunk.
+ */
+ required_hdr_length = SIENA_DYNAMIC_CFG_SIZE(partn + 1);
+ if (required_hdr_length + vpd_length > length) {
+ rc = ENOSPC;
+ goto fail4;
+ }
+
+ if (vpd_offset < required_hdr_length) {
+ (void) memmove((caddr_t)dcfg + required_hdr_length,
+ (caddr_t)dcfg + vpd_offset, vpd_length);
+ vpd_offset = required_hdr_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset,
+ EFX_DWORD_0, vpd_offset);
+ }
+
+ if (hdr_length < required_hdr_length) {
+ (void) memset((caddr_t)dcfg + hdr_length, 0,
+ required_hdr_length - hdr_length);
+ hdr_length = required_hdr_length;
+ EFX_POPULATE_WORD_1(dcfg->length,
+ EFX_WORD_0, hdr_length);
+ }
+
+ /* Get the subtype to insert into the fw_subtype array */
+ if ((rc = siena_nvram_get_subtype(enp, partn, &subtype)) != 0)
+ goto fail5;
+
+ /* Fill out the new version */
+ fwverp = &dcfg->fw_version[partn];
+ EFX_POPULATE_DWORD_1(fwverp->fw_subtype, EFX_DWORD_0, subtype);
+ EFX_POPULATE_WORD_1(fwverp->version_w, EFX_WORD_0, version[0]);
+ EFX_POPULATE_WORD_1(fwverp->version_x, EFX_WORD_0, version[1]);
+ EFX_POPULATE_WORD_1(fwverp->version_y, EFX_WORD_0, version[2]);
+ EFX_POPULATE_WORD_1(fwverp->version_z, EFX_WORD_0, version[3]);
+
+ /* Update the version count */
+ if (nitems < partn + 1) {
+ nitems = partn + 1;
+ EFX_POPULATE_DWORD_1(dcfg->num_fw_version_items,
+ EFX_DWORD_0, nitems);
+ }
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new partition */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, dcfg_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0,
+ (caddr_t)dcfg, vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ EFSYS_KMEM_FREE(enp->en_esip, length, dcfg);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_NVRAM */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_phy.c b/drivers/net/sfc/base/siena_phy.c
new file mode 100644
index 00000000..b90ccabc
--- /dev/null
+++ b/drivers/net/sfc/base/siena_phy.c
@@ -0,0 +1,797 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+static void
+siena_phy_decode_cap(
+ __in uint32_t mcdi_cap,
+ __out uint32_t *maskp)
+{
+ uint32_t mask;
+
+ mask = 0;
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_100FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000HDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_1000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ mask |= (1 << EFX_PHY_CAP_10000FDX);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+ mask |= (1 << EFX_PHY_CAP_PAUSE);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ mask |= (1 << EFX_PHY_CAP_ASYM);
+ if (mcdi_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mask |= (1 << EFX_PHY_CAP_AN);
+
+ *maskp = mask;
+}
+
+static void
+siena_phy_decode_link_mode(
+ __in efx_nic_t *enp,
+ __in uint32_t link_flags,
+ __in unsigned int speed,
+ __in unsigned int fcntl,
+ __out efx_link_mode_t *link_modep,
+ __out unsigned int *fcntlp)
+{
+ boolean_t fd = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ boolean_t up = !!(link_flags &
+ (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+
+ _NOTE(ARGUNUSED(enp))
+
+ if (!up)
+ *link_modep = EFX_LINK_DOWN;
+ else if (speed == 10000 && fd)
+ *link_modep = EFX_LINK_10000FDX;
+ else if (speed == 1000)
+ *link_modep = fd ? EFX_LINK_1000FDX : EFX_LINK_1000HDX;
+ else if (speed == 100)
+ *link_modep = fd ? EFX_LINK_100FDX : EFX_LINK_100HDX;
+ else if (speed == 10)
+ *link_modep = fd ? EFX_LINK_10FDX : EFX_LINK_10HDX;
+ else
+ *link_modep = EFX_LINK_UNKNOWN;
+
+ if (fcntl == MC_CMD_FCNTL_OFF)
+ *fcntlp = 0;
+ else if (fcntl == MC_CMD_FCNTL_RESPOND)
+ *fcntlp = EFX_FCNTL_RESPOND;
+ else if (fcntl == MC_CMD_FCNTL_BIDIR)
+ *fcntlp = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ else {
+ EFSYS_PROBE1(mc_pcol_error, int, fcntl);
+ *fcntlp = 0;
+ }
+}
+
+ void
+siena_phy_link_ev(
+ __in efx_nic_t *enp,
+ __in efx_qword_t *eqp,
+ __out efx_link_mode_t *link_modep)
+{
+ efx_port_t *epp = &(enp->en_port);
+ unsigned int link_flags;
+ unsigned int speed;
+ unsigned int fcntl;
+ efx_link_mode_t link_mode;
+ uint32_t lp_cap_mask;
+
+ /*
+ * Convert the LINKCHANGE speed enumeration into mbit/s, in the
+ * same way as GET_LINK encodes the speed
+ */
+ switch (MCDI_EV_FIELD(eqp, LINKCHANGE_SPEED)) {
+ case MCDI_EVENT_LINKCHANGE_SPEED_100M:
+ speed = 100;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_1G:
+ speed = 1000;
+ break;
+ case MCDI_EVENT_LINKCHANGE_SPEED_10G:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ break;
+ }
+
+ link_flags = MCDI_EV_FIELD(eqp, LINKCHANGE_LINK_FLAGS);
+ siena_phy_decode_link_mode(enp, link_flags, speed,
+ MCDI_EV_FIELD(eqp, LINKCHANGE_FCNTL),
+ &link_mode, &fcntl);
+ siena_phy_decode_cap(MCDI_EV_FIELD(eqp, LINKCHANGE_LP_CAP),
+ &lp_cap_mask);
+
+ /*
+ * It's safe to update ep_lp_cap_mask without the driver's port lock
+ * because presumably any concurrently running efx_port_poll() is
+ * only going to arrive at the same value.
+ *
+ * ep_fcntl has two meanings. It's either the link common fcntl
+ * (if the PHY supports AN), or it's the forced link state. If
+ * the former, it's safe to update the value for the same reason as
+ * for ep_lp_cap_mask. If the latter, then just ignore the value,
+ * because we can race with efx_mac_fcntl_set().
+ */
+ epp->ep_lp_cap_mask = lp_cap_mask;
+ if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_AN))
+ epp->ep_fcntl = fcntl;
+
+ *link_modep = link_mode;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_power(
+ __in efx_nic_t *enp,
+ __in boolean_t power)
+{
+ efx_rc_t rc;
+
+ if (!power)
+ return (0);
+
+ /* Check if the PHY is a zombie */
+ if ((rc = siena_phy_verify(enp)) != 0)
+ goto fail1;
+
+ enp->en_reset_flags |= EFX_RESET_PHY;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_get_link(
+ __in efx_nic_t *enp,
+ __out siena_link_state_t *slsp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_LINK_IN_LEN,
+ MC_CMD_GET_LINK_OUT_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_LINK_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_LINK_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_CAP),
+ &slsp->sls_adv_cap_mask);
+ siena_phy_decode_cap(MCDI_OUT_DWORD(req, GET_LINK_OUT_LP_CAP),
+ &slsp->sls_lp_cap_mask);
+
+ siena_phy_decode_link_mode(enp, MCDI_OUT_DWORD(req, GET_LINK_OUT_FLAGS),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_LINK_SPEED),
+ MCDI_OUT_DWORD(req, GET_LINK_OUT_FCNTL),
+ &slsp->sls_link_mode, &slsp->sls_fcntl);
+
+#if EFSYS_OPT_LOOPBACK
+ /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespace agree */
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_NONE == EFX_LOOPBACK_OFF);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_DATA == EFX_LOOPBACK_DATA);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMAC == EFX_LOOPBACK_GMAC);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGMII == EFX_LOOPBACK_XGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGXS == EFX_LOOPBACK_XGXS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI == EFX_LOOPBACK_XAUI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII == EFX_LOOPBACK_GMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII == EFX_LOOPBACK_SGMII);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XGBR == EFX_LOOPBACK_XGBR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI == EFX_LOOPBACK_XFI);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XAUI_FAR == EFX_LOOPBACK_XAUI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GMII_FAR == EFX_LOOPBACK_GMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_SGMII_FAR == EFX_LOOPBACK_SGMII_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_XFI_FAR == EFX_LOOPBACK_XFI_FAR);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_GPHY == EFX_LOOPBACK_GPHY);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PHYXS == EFX_LOOPBACK_PHY_XS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PCS == EFX_LOOPBACK_PCS);
+ EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_PMAPMD == EFX_LOOPBACK_PMA_PMD);
+
+ slsp->sls_loopback = MCDI_OUT_DWORD(req, GET_LINK_OUT_LOOPBACK_MODE);
+#endif /* EFSYS_OPT_LOOPBACK */
+
+ slsp->sls_mac_up = MCDI_OUT_DWORD(req, GET_LINK_OUT_MAC_FAULT) == 0;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_reconfigure(
+ __in efx_nic_t *enp)
+{
+ efx_port_t *epp = &(enp->en_port);
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MAX(MC_CMD_SET_ID_LED_IN_LEN,
+ MC_CMD_SET_ID_LED_OUT_LEN),
+ MAX(MC_CMD_SET_LINK_IN_LEN,
+ MC_CMD_SET_LINK_OUT_LEN))];
+ uint32_t cap_mask;
+ unsigned int led_mode;
+ unsigned int speed;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_LINK;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_LINK_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_LINK_OUT_LEN;
+
+ cap_mask = epp->ep_adv_cap_mask;
+ MCDI_IN_POPULATE_DWORD_10(req, SET_LINK_IN_CAP,
+ PHY_CAP_10HDX, (cap_mask >> EFX_PHY_CAP_10HDX) & 0x1,
+ PHY_CAP_10FDX, (cap_mask >> EFX_PHY_CAP_10FDX) & 0x1,
+ PHY_CAP_100HDX, (cap_mask >> EFX_PHY_CAP_100HDX) & 0x1,
+ PHY_CAP_100FDX, (cap_mask >> EFX_PHY_CAP_100FDX) & 0x1,
+ PHY_CAP_1000HDX, (cap_mask >> EFX_PHY_CAP_1000HDX) & 0x1,
+ PHY_CAP_1000FDX, (cap_mask >> EFX_PHY_CAP_1000FDX) & 0x1,
+ PHY_CAP_10000FDX, (cap_mask >> EFX_PHY_CAP_10000FDX) & 0x1,
+ PHY_CAP_PAUSE, (cap_mask >> EFX_PHY_CAP_PAUSE) & 0x1,
+ PHY_CAP_ASYM, (cap_mask >> EFX_PHY_CAP_ASYM) & 0x1,
+ PHY_CAP_AN, (cap_mask >> EFX_PHY_CAP_AN) & 0x1);
+
+#if EFSYS_OPT_LOOPBACK
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE,
+ epp->ep_loopback_type);
+ switch (epp->ep_loopback_link_mode) {
+ case EFX_LINK_100FDX:
+ speed = 100;
+ break;
+ case EFX_LINK_1000FDX:
+ speed = 1000;
+ break;
+ case EFX_LINK_10000FDX:
+ speed = 10000;
+ break;
+ default:
+ speed = 0;
+ }
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_MODE, MC_CMD_LOOPBACK_NONE);
+ speed = 0;
+#endif /* EFSYS_OPT_LOOPBACK */
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_LOOPBACK_SPEED, speed);
+
+#if EFSYS_OPT_PHY_FLAGS
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, epp->ep_phy_flags);
+#else
+ MCDI_IN_SET_DWORD(req, SET_LINK_IN_FLAGS, 0);
+#endif /* EFSYS_OPT_PHY_FLAGS */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ /* And set the blink mode */
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_SET_ID_LED;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_SET_ID_LED_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_SET_ID_LED_OUT_LEN;
+
+#if EFSYS_OPT_PHY_LED_CONTROL
+ switch (epp->ep_phy_led_mode) {
+ case EFX_PHY_LED_DEFAULT:
+ led_mode = MC_CMD_LED_DEFAULT;
+ break;
+ case EFX_PHY_LED_OFF:
+ led_mode = MC_CMD_LED_OFF;
+ break;
+ case EFX_PHY_LED_ON:
+ led_mode = MC_CMD_LED_ON;
+ break;
+ default:
+ EFSYS_ASSERT(0);
+ led_mode = MC_CMD_LED_DEFAULT;
+ }
+
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, led_mode);
+#else
+ MCDI_IN_SET_DWORD(req, SET_ID_LED_IN_STATE, MC_CMD_LED_DEFAULT);
+#endif /* EFSYS_OPT_PHY_LED_CONTROL */
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail2;
+ }
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_verify(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_GET_PHY_STATE_IN_LEN,
+ MC_CMD_GET_PHY_STATE_OUT_LEN)];
+ uint32_t state;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_GET_PHY_STATE;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_GET_PHY_STATE_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_GET_PHY_STATE_OUT_LEN;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_GET_PHY_STATE_OUT_LEN) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ state = MCDI_OUT_DWORD(req, GET_PHY_STATE_OUT_STATE);
+ if (state != MC_CMD_PHY_STATE_OK) {
+ if (state != MC_CMD_PHY_STATE_ZOMBIE)
+ EFSYS_PROBE1(mc_pcol_error, int, state);
+ rc = ENOTACTIVE;
+ goto fail3;
+ }
+
+ return (0);
+
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_phy_oui_get(
+ __in efx_nic_t *enp,
+ __out uint32_t *ouip)
+{
+ _NOTE(ARGUNUSED(enp, ouip))
+
+ return (ENOTSUP);
+}
+
+#if EFSYS_OPT_PHY_STATS
+
+#define SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ _mc_record, _efx_record) \
+ if ((_vmask) & (1ULL << (_mc_record))) { \
+ (_smask) |= (1ULL << (_efx_record)); \
+ if ((_stat) != NULL && !EFSYS_MEM_IS_NULL(_esmp)) { \
+ efx_dword_t dword; \
+ EFSYS_MEM_READD(_esmp, (_mc_record) * 4, &dword);\
+ (_stat)[_efx_record] = \
+ EFX_DWORD_FIELD(dword, EFX_DWORD_0); \
+ } \
+ }
+
+#define SIENA_SIMPLE_STAT_SET2(_vmask, _esmp, _smask, _stat, _record) \
+ SIENA_SIMPLE_STAT_SET(_vmask, _esmp, _smask, _stat, \
+ MC_CMD_ ## _record, \
+ EFX_PHY_STAT_ ## _record)
+
+ void
+siena_phy_decode_stats(
+ __in efx_nic_t *enp,
+ __in uint32_t vmask,
+ __in_opt efsys_mem_t *esmp,
+ __out_opt uint64_t *smaskp,
+ __inout_ecount_opt(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ uint64_t smask = 0;
+
+ _NOTE(ARGUNUSED(enp))
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, OUI);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PMA_PMD_TX_FAULT);
+
+ if (vmask & (1 << MC_CMD_PMA_PMD_SIGNAL)) {
+ smask |= ((1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_A) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_B) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_C) |
+ (1ULL << EFX_PHY_STAT_PMA_PMD_SIGNAL_D));
+ if (stat != NULL && esmp != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sig;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PMA_PMD_SIGNAL,
+ &dword);
+ sig = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_A] = (sig >> 1) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_B] = (sig >> 2) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_C] = (sig >> 3) & 1;
+ stat[EFX_PHY_STAT_PMA_PMD_SIGNAL_D] = (sig >> 4) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_A,
+ EFX_PHY_STAT_SNR_A);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_B,
+ EFX_PHY_STAT_SNR_B);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_C,
+ EFX_PHY_STAT_SNR_C);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PMA_PMD_SNR_D,
+ EFX_PHY_STAT_SNR_D);
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BER);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, PCS_BLOCK_ERRORS);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_LINK_UP,
+ EFX_PHY_STAT_PHY_XS_LINK_UP);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_RX_FAULT,
+ EFX_PHY_STAT_PHY_XS_RX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_TX_FAULT,
+ EFX_PHY_STAT_PHY_XS_TX_FAULT);
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_PHYXS_ALIGN,
+ EFX_PHY_STAT_PHY_XS_ALIGN);
+
+ if (vmask & (1 << MC_CMD_PHYXS_SYNC)) {
+ smask |= ((1 << EFX_PHY_STAT_PHY_XS_SYNC_A) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_B) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_C) |
+ (1 << EFX_PHY_STAT_PHY_XS_SYNC_D));
+ if (stat != NULL && !EFSYS_MEM_IS_NULL(esmp)) {
+ efx_dword_t dword;
+ uint32_t sync;
+ EFSYS_MEM_READD(esmp, 4 * MC_CMD_PHYXS_SYNC, &dword);
+ sync = EFX_DWORD_FIELD(dword, EFX_DWORD_0);
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_A] = (sync >> 0) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_B] = (sync >> 1) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_C] = (sync >> 2) & 1;
+ stat[EFX_PHY_STAT_PHY_XS_SYNC_D] = (sync >> 3) & 1;
+ }
+ }
+
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_LINK_UP);
+ SIENA_SIMPLE_STAT_SET2(vmask, esmp, smask, stat, AN_COMPLETE);
+
+ SIENA_SIMPLE_STAT_SET(vmask, esmp, smask, stat, MC_CMD_CL22_LINK_UP,
+ EFX_PHY_STAT_CL22EXT_LINK_UP);
+
+ if (smaskp != NULL)
+ *smaskp = smask;
+}
+
+ __checkReturn efx_rc_t
+siena_phy_stats_update(
+ __in efx_nic_t *enp,
+ __in efsys_mem_t *esmp,
+ __inout_ecount(EFX_PHY_NSTATS) uint32_t *stat)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint32_t vmask = encp->enc_mcdi_phy_stat_mask;
+ uint64_t smask;
+ efx_mcdi_req_t req;
+ uint8_t payload[MAX(MC_CMD_PHY_STATS_IN_LEN,
+ MC_CMD_PHY_STATS_OUT_DMA_LEN)];
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_PHY_STATS;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_PHY_STATS_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MC_CMD_PHY_STATS_OUT_DMA_LEN;
+
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_LO,
+ EFSYS_MEM_ADDR(esmp) & 0xffffffff);
+ MCDI_IN_SET_DWORD(req, PHY_STATS_IN_DMA_ADDR_HI,
+ EFSYS_MEM_ADDR(esmp) >> 32);
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+ EFSYS_ASSERT3U(req.emr_out_length, ==, MC_CMD_PHY_STATS_OUT_DMA_LEN);
+
+ siena_phy_decode_stats(enp, vmask, esmp, &smask, stat);
+ EFSYS_ASSERT(smask == encp->enc_phy_stat_mask);
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (0);
+}
+
+#endif /* EFSYS_OPT_PHY_STATS */
+
+#if EFSYS_OPT_BIST
+
+ __checkReturn efx_rc_t
+siena_phy_bist_start(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ efx_rc_t rc;
+
+ if ((rc = efx_mcdi_bist_start(enp, type)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+static __checkReturn unsigned long
+siena_phy_sft9001_bist_status(
+ __in uint16_t code)
+{
+ switch (code) {
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY:
+ return (EFX_PHY_CABLE_STATUS_BUSY);
+ case MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTERPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT:
+ return (EFX_PHY_CABLE_STATUS_INTRAPAIRSHORT);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN:
+ return (EFX_PHY_CABLE_STATUS_OPEN);
+ case MC_CMD_POLL_BIST_SFT9001_PAIR_OK:
+ return (EFX_PHY_CABLE_STATUS_OK);
+ default:
+ return (EFX_PHY_CABLE_STATUS_INVALID);
+ }
+}
+
+ __checkReturn efx_rc_t
+siena_phy_bist_poll(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type,
+ __out efx_bist_result_t *resultp,
+ __out_opt __drv_when(count > 0, __notnull)
+ uint32_t *value_maskp,
+ __out_ecount_opt(count) __drv_when(count > 0, __notnull)
+ unsigned long *valuesp,
+ __in size_t count)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ uint8_t payload[MAX(MC_CMD_POLL_BIST_IN_LEN,
+ MCDI_CTL_SDU_LEN_MAX)];
+ uint32_t value_mask = 0;
+ efx_mcdi_req_t req;
+ uint32_t result;
+ efx_rc_t rc;
+
+ (void) memset(payload, 0, sizeof (payload));
+ req.emr_cmd = MC_CMD_POLL_BIST;
+ req.emr_in_buf = payload;
+ req.emr_in_length = MC_CMD_POLL_BIST_IN_LEN;
+ req.emr_out_buf = payload;
+ req.emr_out_length = MCDI_CTL_SDU_LEN_MAX;
+
+ efx_mcdi_execute(enp, &req);
+
+ if (req.emr_rc != 0) {
+ rc = req.emr_rc;
+ goto fail1;
+ }
+
+ if (req.emr_out_length_used < MC_CMD_POLL_BIST_OUT_RESULT_OFST + 4) {
+ rc = EMSGSIZE;
+ goto fail2;
+ }
+
+ if (count > 0)
+ (void) memset(valuesp, '\0', count * sizeof (unsigned long));
+
+ result = MCDI_OUT_DWORD(req, POLL_BIST_OUT_RESULT);
+
+ /* Extract PHY specific results */
+ if (result == MC_CMD_POLL_BIST_PASSED &&
+ encp->enc_phy_type == EFX_PHY_SFT9001B &&
+ req.emr_out_length_used >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN &&
+ (type == EFX_BIST_TYPE_PHY_CABLE_SHORT ||
+ type == EFX_BIST_TYPE_PHY_CABLE_LONG)) {
+ uint16_t word;
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_A) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_A] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_B) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_B] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_C) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_C] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_LENGTH_D) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_PHY_CABLE_LENGTH_D] =
+ MCDI_OUT_DWORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D);
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_LENGTH_D);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_A) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_A);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_A] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_A);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_B) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_B);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_B] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_B);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_C) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_C);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_C] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_C);
+ }
+
+ if (count > EFX_BIST_PHY_CABLE_STATUS_D) {
+ if (valuesp != NULL) {
+ word = MCDI_OUT_WORD(req,
+ POLL_BIST_OUT_SFT9001_CABLE_STATUS_D);
+ valuesp[EFX_BIST_PHY_CABLE_STATUS_D] =
+ siena_phy_sft9001_bist_status(word);
+ }
+ value_mask |= (1 << EFX_BIST_PHY_CABLE_STATUS_D);
+ }
+
+ } else if (result == MC_CMD_POLL_BIST_FAILED &&
+ encp->enc_phy_type == EFX_PHY_QLX111V &&
+ req.emr_out_length >= MC_CMD_POLL_BIST_OUT_MRSFP_LEN &&
+ count > EFX_BIST_FAULT_CODE) {
+ if (valuesp != NULL)
+ valuesp[EFX_BIST_FAULT_CODE] =
+ MCDI_OUT_DWORD(req, POLL_BIST_OUT_MRSFP_TEST);
+ value_mask |= 1 << EFX_BIST_FAULT_CODE;
+ }
+
+ if (value_maskp != NULL)
+ *value_maskp = value_mask;
+
+ EFSYS_ASSERT(resultp != NULL);
+ if (result == MC_CMD_POLL_BIST_RUNNING)
+ *resultp = EFX_BIST_RESULT_RUNNING;
+ else if (result == MC_CMD_POLL_BIST_PASSED)
+ *resultp = EFX_BIST_RESULT_PASSED;
+ else
+ *resultp = EFX_BIST_RESULT_FAILED;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_phy_bist_stop(
+ __in efx_nic_t *enp,
+ __in efx_bist_type_t type)
+{
+ /* There is no way to stop BIST on Siena */
+ _NOTE(ARGUNUSED(enp, type))
+}
+
+#endif /* EFSYS_OPT_BIST */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_sram.c b/drivers/net/sfc/base/siena_sram.c
new file mode 100644
index 00000000..572c2e9a
--- /dev/null
+++ b/drivers/net/sfc/base/siena_sram.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_SIENA
+
+ void
+siena_sram_init(
+ __in efx_nic_t *enp)
+{
+ efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
+ efx_oword_t oword;
+ uint32_t rx_base, tx_base;
+
+ EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ rx_base = encp->enc_buftbl_limit;
+ tx_base = rx_base + (encp->enc_rxq_limit *
+ EFX_RXQ_DC_NDESCS(EFX_RXQ_DC_SIZE));
+
+ /* Initialize the transmit descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, tx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_TX_DC_SIZE, EFX_TXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_TX_DC_CFG_REG, &oword);
+
+ /* Initialize the receive descriptor cache */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rx_base);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_SIZE, EFX_RXQ_DC_SIZE);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_CFG_REG, &oword);
+
+ /* Set receive descriptor pre-fetch low water mark */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_RX_DC_PF_LWM, 56);
+ EFX_BAR_WRITEO(enp, FR_AZ_RX_DC_PF_WM_REG, &oword);
+
+ /* Set the event queue to use for SRAM updates */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_UPD_EVQ_ID, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_UPD_EVQ_REG, &oword);
+}
+
+#if EFSYS_OPT_DIAG
+
+ __checkReturn efx_rc_t
+siena_sram_test(
+ __in efx_nic_t *enp,
+ __in efx_sram_pattern_fn_t func)
+{
+ efx_oword_t oword;
+ efx_qword_t qword;
+ efx_qword_t verify;
+ size_t rows;
+ unsigned int wptr;
+ unsigned int rptr;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Reconfigure into HALF buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 0);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * Move the descriptor caches up to the top of SRAM, and test
+ * all of SRAM below them. We only miss out one row here.
+ */
+ rows = SIENA_SRAM_ROWS - 1;
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_RX_DC_BASE_ADR, rows);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_RX_DC_CFG_REG, &oword);
+
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_SRM_TX_DC_BASE_ADR, rows + 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_SRM_TX_DC_CFG_REG, &oword);
+
+ /*
+ * Write the pattern through BUF_HALF_TBL. Write
+ * in 64 entry batches, waiting 1us in between each batch
+ * to guarantee not to overflow the SRAM fifo
+ */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_FALSE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_FALSE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail1;
+ }
+ }
+ }
+
+ /* And do the same negated */
+ for (wptr = 0, rptr = 0; wptr < rows; ++wptr) {
+ func(wptr, B_TRUE, &qword);
+ EFX_BAR_TBL_WRITEQ(enp, FR_AZ_BUF_HALF_TBL, wptr, &qword);
+
+ if ((wptr - rptr) < 64 && wptr < rows - 1)
+ continue;
+
+ EFSYS_SPIN(1);
+
+ for (; rptr <= wptr; ++rptr) {
+ func(rptr, B_TRUE, &qword);
+ EFX_BAR_TBL_READQ(enp, FR_AZ_BUF_HALF_TBL, rptr,
+ &verify);
+
+ if (!EFX_QWORD_IS_EQUAL(verify, qword)) {
+ rc = EFAULT;
+ goto fail2;
+ }
+ }
+ }
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ /*
+ * We don't need to reconfigure SRAM again because the API
+ * requires efx_nic_fini() to be called after an sram test.
+ */
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ /* Restore back to FULL buffer table mode */
+ EFX_POPULATE_OWORD_1(oword, FRF_AZ_BUF_TBL_MODE, 1);
+ EFX_BAR_WRITEO(enp, FR_AZ_BUF_TBL_CFG_REG, &oword);
+
+ return (rc);
+}
+
+#endif /* EFSYS_OPT_DIAG */
+
+#endif /* EFSYS_OPT_SIENA */
diff --git a/drivers/net/sfc/base/siena_vpd.c b/drivers/net/sfc/base/siena_vpd.c
new file mode 100644
index 00000000..4fb2e426
--- /dev/null
+++ b/drivers/net/sfc/base/siena_vpd.c
@@ -0,0 +1,618 @@
+/*
+ * Copyright (c) 2009-2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are
+ * those of the authors and should not be interpreted as representing official
+ * policies, either expressed or implied, of the FreeBSD Project.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+#if EFSYS_OPT_VPD
+
+#if EFSYS_OPT_SIENA
+
+static __checkReturn efx_rc_t
+siena_vpd_get_static(
+ __in efx_nic_t *enp,
+ __in uint32_t partn,
+ __deref_out_bcount_opt(*sizep) caddr_t *svpdp,
+ __out size_t *sizep)
+{
+ siena_mc_static_config_hdr_t *scfg;
+ caddr_t svpd;
+ size_t size;
+ uint8_t cksum;
+ unsigned int vpd_offset;
+ unsigned int vpd_length;
+ unsigned int hdr_length;
+ unsigned int pos;
+ unsigned int region;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 ||
+ partn == MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1);
+
+ /* Allocate sufficient memory for the entire static cfg area */
+ if ((rc = siena_nvram_partn_size(enp, partn, &size)) != 0)
+ goto fail1;
+
+ EFSYS_KMEM_ALLOC(enp->en_esip, size, scfg);
+ if (scfg == NULL) {
+ rc = ENOMEM;
+ goto fail2;
+ }
+
+ if ((rc = siena_nvram_partn_read(enp, partn, 0,
+ (caddr_t)scfg, SIENA_NVRAM_CHUNK)) != 0)
+ goto fail3;
+
+ /* Verify the magic number */
+ if (EFX_DWORD_FIELD(scfg->magic, EFX_DWORD_0) !=
+ SIENA_MC_STATIC_CONFIG_MAGIC) {
+ rc = EINVAL;
+ goto fail4;
+ }
+
+ /* All future versions of the structure must be backwards compatible */
+ EFX_STATIC_ASSERT(SIENA_MC_STATIC_CONFIG_VERSION == 0);
+
+ hdr_length = EFX_WORD_FIELD(scfg->length, EFX_WORD_0);
+ vpd_offset = EFX_DWORD_FIELD(scfg->static_vpd_offset, EFX_DWORD_0);
+ vpd_length = EFX_DWORD_FIELD(scfg->static_vpd_length, EFX_DWORD_0);
+
+ /* Verify the hdr doesn't overflow the sector size */
+ if (hdr_length > size || vpd_offset > size || vpd_length > size ||
+ vpd_length + vpd_offset > size) {
+ rc = EINVAL;
+ goto fail5;
+ }
+
+ /* Read the remainder of scfg + static vpd */
+ region = vpd_offset + vpd_length;
+ if (region > SIENA_NVRAM_CHUNK) {
+ if ((rc = siena_nvram_partn_read(enp, partn, SIENA_NVRAM_CHUNK,
+ (caddr_t)scfg + SIENA_NVRAM_CHUNK,
+ region - SIENA_NVRAM_CHUNK)) != 0)
+ goto fail6;
+ }
+
+ /* Verify checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)scfg)[pos];
+ if (cksum != 0) {
+ rc = EINVAL;
+ goto fail7;
+ }
+
+ if (vpd_length == 0)
+ svpd = NULL;
+ else {
+ /* Copy the vpd data out */
+ EFSYS_KMEM_ALLOC(enp->en_esip, vpd_length, svpd);
+ if (svpd == NULL) {
+ rc = ENOMEM;
+ goto fail8;
+ }
+ memcpy(svpd, (caddr_t)scfg + vpd_offset, vpd_length);
+ }
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+ *svpdp = svpd;
+ *sizep = vpd_length;
+
+ return (0);
+
+fail8:
+ EFSYS_PROBE(fail8);
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, scfg);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_init(
+ __in efx_nic_t *enp)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ caddr_t svpd = NULL;
+ unsigned int partn;
+ size_t size = 0;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1;
+
+ /*
+ * We need the static VPD sector to present a unified static+dynamic
+ * VPD, that is, basically on every read, write, verify cycle. Since
+ * it should *never* change we can just cache it here.
+ */
+ if ((rc = siena_vpd_get_static(enp, partn, &svpd, &size)) != 0)
+ goto fail1;
+
+ if (svpd != NULL && size > 0) {
+ if ((rc = efx_vpd_hunk_verify(svpd, size, NULL)) != 0)
+ goto fail2;
+ }
+
+ enp->en_u.siena.enu_svpd = svpd;
+ enp->en_u.siena.enu_svpd_length = size;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, size, svpd);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_size(
+ __in efx_nic_t *enp,
+ __out size_t *sizep)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ uint32_t partn;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * This function returns the total size the user should allocate
+ * for all VPD operations. We've already cached the static vpd,
+ * so we just need to return an upper bound on the dynamic vpd.
+ * Since the dynamic_config structure can change under our feet,
+ * (as version numbers are inserted), just be safe and return the
+ * total size of the dynamic_config *sector*
+ */
+ partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, partn, sizep)) != 0)
+ goto fail1;
+
+ return (0);
+
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_read(
+ __in efx_nic_t *enp,
+ __out_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_length;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ size_t dcfg_size;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_TRUE, &dcfg, &dcfg_size)) != 0)
+ goto fail1;
+
+ vpd_length = EFX_DWORD_FIELD(dcfg->dynamic_vpd_length, EFX_DWORD_0);
+ vpd_offset = EFX_DWORD_FIELD(dcfg->dynamic_vpd_offset, EFX_DWORD_0);
+
+ if (vpd_length > size) {
+ rc = EFAULT; /* Invalid dcfg: header bigger than sector */
+ goto fail2;
+ }
+
+ EFSYS_ASSERT3U(vpd_length, <=, size);
+ memcpy(data, (caddr_t)dcfg + vpd_offset, vpd_length);
+
+ /* Pad data with all-1s, consistent with update operations */
+ memset(data + vpd_length, 0xff, size - vpd_length);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_verify(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_vpd_tag_t stag;
+ efx_vpd_tag_t dtag;
+ efx_vpd_keyword_t skey;
+ efx_vpd_keyword_t dkey;
+ unsigned int scont;
+ unsigned int dcont;
+
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /*
+ * Strictly you could take the view that dynamic vpd is optional.
+ * Instead, to conform more closely to the read/verify/reinit()
+ * paradigm, we require dynamic vpd. siena_vpd_reinit() will
+ * reinitialize it as required.
+ */
+ if ((rc = efx_vpd_hunk_verify(data, size, NULL)) != 0)
+ goto fail1;
+
+ /*
+ * Verify that there is no duplication between the static and
+ * dynamic cfg sectors.
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ goto done;
+
+ dcont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(data, size, &dtag,
+ &dkey, NULL, NULL, &dcont)) != 0)
+ goto fail2;
+ if (dcont == 0)
+ break;
+
+ /*
+ * Skip the RV keyword. It should be present in both the static
+ * and dynamic cfg sectors.
+ */
+ if (dtag == EFX_VPD_RO && dkey == EFX_VPD_KEYWORD('R', 'V'))
+ continue;
+
+ scont = 0;
+ _NOTE(CONSTANTCONDITION)
+ while (1) {
+ if ((rc = efx_vpd_hunk_next(
+ enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, &stag, &skey,
+ NULL, NULL, &scont)) != 0)
+ goto fail3;
+ if (scont == 0)
+ break;
+
+ if (stag == dtag && skey == dkey) {
+ rc = EEXIST;
+ goto fail4;
+ }
+ }
+ }
+
+done:
+ return (0);
+
+fail4:
+ EFSYS_PROBE(fail4);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_reinit(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ boolean_t wantpid;
+ efx_rc_t rc;
+
+ /*
+ * Only create a PID if the dynamic cfg doesn't have one
+ */
+ if (enp->en_u.siena.enu_svpd_length == 0)
+ wantpid = B_TRUE;
+ else {
+ unsigned int offset;
+ uint8_t length;
+
+ rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length,
+ EFX_VPD_ID, 0, &offset, &length);
+ if (rc == 0)
+ wantpid = B_FALSE;
+ else if (rc == ENOENT)
+ wantpid = B_TRUE;
+ else
+ goto fail1;
+ }
+
+ if ((rc = efx_vpd_hunk_reinit(data, size, wantpid)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_get(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __inout efx_vpd_value_t *evvp)
+{
+ unsigned int offset;
+ uint8_t length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Attempt to satisfy the request from svpd first */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value,
+ enp->en_u.siena.enu_svpd + offset, length);
+ return (0);
+ } else if (rc != ENOENT)
+ goto fail1;
+ }
+
+ /* And then from the provided data buffer */
+ if ((rc = efx_vpd_hunk_get(data, size, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) != 0) {
+ if (rc == ENOENT)
+ return (rc);
+
+ goto fail2;
+ }
+
+ evvp->evv_length = length;
+ memcpy(evvp->evv_value, data + offset, length);
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_set(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __in efx_vpd_value_t *evvp)
+{
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* If the provided (tag,keyword) exists in svpd, then it is readonly */
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ unsigned int offset;
+ uint8_t length;
+
+ if ((rc = efx_vpd_hunk_get(enp->en_u.siena.enu_svpd,
+ enp->en_u.siena.enu_svpd_length, evvp->evv_tag,
+ evvp->evv_keyword, &offset, &length)) == 0) {
+ rc = EACCES;
+ goto fail1;
+ }
+ }
+
+ if ((rc = efx_vpd_hunk_set(data, size, evvp)) != 0)
+ goto fail2;
+
+ return (0);
+
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_next(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size,
+ __out efx_vpd_value_t *evvp,
+ __inout unsigned int *contp)
+{
+ _NOTE(ARGUNUSED(enp, data, size, evvp, contp))
+
+ return (ENOTSUP);
+}
+
+ __checkReturn efx_rc_t
+siena_vpd_write(
+ __in efx_nic_t *enp,
+ __in_bcount(size) caddr_t data,
+ __in size_t size)
+{
+ efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip);
+ siena_mc_dynamic_config_hdr_t *dcfg = NULL;
+ unsigned int vpd_offset;
+ unsigned int dcfg_partn;
+ unsigned int hdr_length;
+ unsigned int pos;
+ uint8_t cksum;
+ size_t partn_size, dcfg_size;
+ size_t vpd_length;
+ efx_rc_t rc;
+
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ /* Determine total length of all tags */
+ if ((rc = efx_vpd_hunk_length(data, size, &vpd_length)) != 0)
+ goto fail1;
+
+ /* Lock dynamic config sector for write, and read structure only */
+ dcfg_partn = (emip->emi_port == 1)
+ ? MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0
+ : MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1;
+
+ if ((rc = siena_nvram_partn_size(enp, dcfg_partn, &partn_size)) != 0)
+ goto fail2;
+
+ if ((rc = siena_nvram_partn_lock(enp, dcfg_partn)) != 0)
+ goto fail3;
+
+ if ((rc = siena_nvram_get_dynamic_cfg(enp, dcfg_partn,
+ B_FALSE, &dcfg, &dcfg_size)) != 0)
+ goto fail4;
+
+ hdr_length = EFX_WORD_FIELD(dcfg->length, EFX_WORD_0);
+
+ /* Allocated memory should have room for the new VPD */
+ if (hdr_length + vpd_length > dcfg_size) {
+ rc = ENOSPC;
+ goto fail5;
+ }
+
+ /* Copy in new vpd and update header */
+ vpd_offset = dcfg_size - vpd_length;
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_offset, EFX_DWORD_0, vpd_offset);
+ memcpy((caddr_t)dcfg + vpd_offset, data, vpd_length);
+ EFX_POPULATE_DWORD_1(dcfg->dynamic_vpd_length, EFX_DWORD_0, vpd_length);
+
+ /* Update the checksum */
+ cksum = 0;
+ for (pos = 0; pos < hdr_length; pos++)
+ cksum += ((uint8_t *)dcfg)[pos];
+ dcfg->csum.eb_u8[0] -= cksum;
+
+ /* Erase and write the new sector */
+ if ((rc = siena_nvram_partn_erase(enp, dcfg_partn, 0, partn_size)) != 0)
+ goto fail6;
+
+ /* Write out the new structure to nvram */
+ if ((rc = siena_nvram_partn_write(enp, dcfg_partn, 0, (caddr_t)dcfg,
+ vpd_offset + vpd_length)) != 0)
+ goto fail7;
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+
+ return (0);
+
+fail7:
+ EFSYS_PROBE(fail7);
+fail6:
+ EFSYS_PROBE(fail6);
+fail5:
+ EFSYS_PROBE(fail5);
+
+ EFSYS_KMEM_FREE(enp->en_esip, dcfg_size, dcfg);
+fail4:
+ EFSYS_PROBE(fail4);
+
+ siena_nvram_partn_unlock(enp, dcfg_partn);
+fail3:
+ EFSYS_PROBE(fail3);
+fail2:
+ EFSYS_PROBE(fail2);
+fail1:
+ EFSYS_PROBE1(fail1, efx_rc_t, rc);
+
+ return (rc);
+}
+
+ void
+siena_vpd_fini(
+ __in efx_nic_t *enp)
+{
+ EFSYS_ASSERT(enp->en_family == EFX_FAMILY_SIENA);
+
+ if (enp->en_u.siena.enu_svpd_length > 0) {
+ EFSYS_KMEM_FREE(enp->en_esip, enp->en_u.siena.enu_svpd_length,
+ enp->en_u.siena.enu_svpd);
+
+ enp->en_u.siena.enu_svpd = NULL;
+ enp->en_u.siena.enu_svpd_length = 0;
+ }
+}
+
+#endif /* EFSYS_OPT_SIENA */
+
+#endif /* EFSYS_OPT_VPD */
diff --git a/drivers/net/sfc/efsys.h b/drivers/net/sfc/efsys.h
new file mode 100644
index 00000000..0405d02b
--- /dev/null
+++ b/drivers/net/sfc/efsys.h
@@ -0,0 +1,780 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_COMMON_EFSYS_H
+#define _SFC_COMMON_EFSYS_H
+
+#include <stdbool.h>
+
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_memzone.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_log.h>
+#include <rte_io.h>
+
+#include "sfc_debug.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define EFSYS_HAS_UINT64 1
+#define EFSYS_USE_UINT64 1
+#define EFSYS_HAS_SSE2_M128 1
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 1
+#define EFSYS_IS_LITTLE_ENDIAN 0
+#elif RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+#define EFSYS_IS_BIG_ENDIAN 0
+#define EFSYS_IS_LITTLE_ENDIAN 1
+#else
+#error "Cannot determine system endianness"
+#endif
+#include "efx_types.h"
+
+
+#ifndef _NOTE
+#define _NOTE(s)
+#endif
+
+typedef bool boolean_t;
+
+#ifndef B_FALSE
+#define B_FALSE false
+#endif
+#ifndef B_TRUE
+#define B_TRUE true
+#endif
+
+/*
+ * RTE_MAX() and RTE_MIN() cannot be used since braced-group within
+ * expression allowed only inside a function, but MAX() is used as
+ * a number of elements in array.
+ */
+#ifndef MAX
+#define MAX(v1, v2) ((v1) > (v2) ? (v1) : (v2))
+#endif
+#ifndef MIN
+#define MIN(v1, v2) ((v1) < (v2) ? (v1) : (v2))
+#endif
+
+/* There are macros for alignment in DPDK, but we need to make a proper
+ * correspondence here, if we want to re-use them at all
+ */
+#ifndef IS_P2ALIGNED
+#define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
+#endif
+
+#ifndef P2ROUNDUP
+#define P2ROUNDUP(x, align) (-(-(x) & -(align)))
+#endif
+
+#ifndef P2ALIGN
+#define P2ALIGN(_x, _a) ((_x) & -(_a))
+#endif
+
+#ifndef IS2P
+#define ISP2(x) rte_is_power_of_2(x)
+#endif
+
+#define ENOTACTIVE ENOTCONN
+
+static inline void
+prefetch_read_many(const volatile void *addr)
+{
+ rte_prefetch0(addr);
+}
+
+static inline void
+prefetch_read_once(const volatile void *addr)
+{
+ rte_prefetch_non_temporal(addr);
+}
+
+/* Modifiers used for Windows builds */
+#define __in
+#define __in_opt
+#define __in_ecount(_n)
+#define __in_ecount_opt(_n)
+#define __in_bcount(_n)
+#define __in_bcount_opt(_n)
+
+#define __out
+#define __out_opt
+#define __out_ecount(_n)
+#define __out_ecount_opt(_n)
+#define __out_bcount(_n)
+#define __out_bcount_opt(_n)
+
+#define __deref_out
+
+#define __inout
+#define __inout_opt
+#define __inout_ecount(_n)
+#define __inout_ecount_opt(_n)
+#define __inout_bcount(_n)
+#define __inout_bcount_opt(_n)
+#define __inout_bcount_full_opt(_n)
+
+#define __deref_out_bcount_opt(n)
+
+#define __checkReturn
+#define __success(_x)
+
+#define __drv_when(_p, _c)
+
+/* Code inclusion options */
+
+
+#define EFSYS_OPT_NAMES 1
+
+/* Disable SFN5xxx/SFN6xxx since it requires specific support in the PMD */
+#define EFSYS_OPT_SIENA 0
+/* Enable SFN7xxx support */
+#define EFSYS_OPT_HUNTINGTON 1
+/* Enable SFN8xxx support */
+#define EFSYS_OPT_MEDFORD 1
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+#define EFSYS_OPT_CHECK_REG 1
+#else
+#define EFSYS_OPT_CHECK_REG 0
+#endif
+
+/* MCDI is required for SFN7xxx and SFN8xx */
+#define EFSYS_OPT_MCDI 1
+#define EFSYS_OPT_MCDI_LOGGING 1
+#define EFSYS_OPT_MCDI_PROXY_AUTH 1
+
+#define EFSYS_OPT_MAC_STATS 1
+
+#define EFSYS_OPT_LOOPBACK 0
+
+#define EFSYS_OPT_MON_MCDI 0
+#define EFSYS_OPT_MON_STATS 0
+
+#define EFSYS_OPT_PHY_STATS 0
+#define EFSYS_OPT_BIST 0
+#define EFSYS_OPT_PHY_LED_CONTROL 0
+#define EFSYS_OPT_PHY_FLAGS 0
+
+#define EFSYS_OPT_VPD 0
+#define EFSYS_OPT_NVRAM 0
+#define EFSYS_OPT_BOOTCFG 0
+
+#define EFSYS_OPT_DIAG 0
+#define EFSYS_OPT_RX_SCALE 1
+#define EFSYS_OPT_QSTATS 0
+/* Filters support is required for SFN7xxx and SFN8xx */
+#define EFSYS_OPT_FILTER 1
+#define EFSYS_OPT_RX_SCATTER 0
+
+#define EFSYS_OPT_EV_PREFETCH 0
+
+#define EFSYS_OPT_DECODE_INTR_FATAL 0
+
+#define EFSYS_OPT_LICENSING 0
+
+#define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
+
+#define EFSYS_OPT_RX_PACKED_STREAM 0
+
+/* ID */
+
+typedef struct __efsys_identifier_s efsys_identifier_t;
+
+
+#define EFSYS_PROBE(_name) \
+ do { } while (0)
+
+#define EFSYS_PROBE1(_name, _type1, _arg1) \
+ do { } while (0)
+
+#define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
+ do { } while (0)
+
+#define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3) \
+ do { } while (0)
+
+#define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4) \
+ do { } while (0)
+
+#define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5) \
+ do { } while (0)
+
+#define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6) \
+ do { } while (0)
+
+#define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
+ _type3, _arg3, _type4, _arg4, _type5, _arg5, \
+ _type6, _arg6, _type7, _arg7) \
+ do { } while (0)
+
+
+/* DMA */
+
+typedef phys_addr_t efsys_dma_addr_t;
+
+typedef struct efsys_mem_s {
+ const struct rte_memzone *esm_mz;
+ /*
+ * Ideally it should have volatile qualifier to denote that
+ * the memory may be updated by someone else. However, it adds
+ * qualifier discard warnings when the pointer or its derivative
+ * is passed to memset() or rte_mov16().
+ * So, skip the qualifier here, but make sure that it is added
+ * below in access macros.
+ */
+ void *esm_base;
+ efsys_dma_addr_t esm_addr;
+} efsys_mem_t;
+
+
+#define EFSYS_MEM_ZERO(_esmp, _size) \
+ do { \
+ (void)memset((void *)(_esmp)->esm_base, 0, (_size)); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READD(_esmp, _offset, _edp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ (_edp)->ed_u32[0] = _addr[0]; \
+ \
+ EFSYS_PROBE2(mem_readl, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ (_eqp)->eq_u64[0] = _addr[0]; \
+ \
+ EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_READO(_esmp, _offset, _eop) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ (_eop)->eo_u128[0] = _addr[0]; \
+ \
+ EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ _addr[0] = (_edp)->ed_u32[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ _addr[0] = (_eqp)->eq_u64[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ \
+ EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ _addr[0] = (_eop)->eo_u128[0]; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_MEM_ADDR(_esmp) \
+ ((_esmp)->esm_addr)
+
+#define EFSYS_MEM_IS_NULL(_esmp) \
+ ((_esmp)->esm_base == NULL)
+
+#define EFSYS_MEM_PREFETCH(_esmp, _offset) \
+ do { \
+ volatile uint8_t *_base = (_esmp)->esm_base; \
+ \
+ rte_prefetch0(_base + (_offset)); \
+ } while (0)
+
+
+/* BAR */
+
+typedef struct efsys_bar_s {
+ rte_spinlock_t esb_lock;
+ int esb_rid;
+ struct rte_pci_device *esb_dev;
+ /*
+ * Ideally it should have volatile qualifier to denote that
+ * the memory may be updated by someone else. However, it adds
+ * qualifier discard warnings when the pointer or its derivative
+ * is passed to memset() or rte_mov16().
+ * So, skip the qualifier here, but make sure that it is added
+ * below in access macros.
+ */
+ void *esb_base;
+} efsys_bar_t;
+
+#define SFC_BAR_LOCK_INIT(_esbp, _ifname) \
+ do { \
+ rte_spinlock_init(&(_esbp)->esb_lock); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+#define SFC_BAR_LOCK_DESTROY(_esbp) ((void)0)
+#define SFC_BAR_LOCK(_esbp) rte_spinlock_lock(&(_esbp)->esb_lock)
+#define SFC_BAR_UNLOCK(_esbp) rte_spinlock_unlock(&(_esbp)->esb_lock)
+
+#define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ rte_rmb(); \
+ (_edp)->ed_u32[0] = rte_read32_relaxed(_addr); \
+ \
+ EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ rte_rmb(); \
+ (_eqp)->eq_u64[0] = rte_read64_relaxed(_addr); \
+ \
+ EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ rte_rmb(); \
+ /* There is no rte_read128_relaxed() yet */ \
+ (_eop)->eo_u128[0] = _addr[0]; \
+ \
+ EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+
+#define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint32_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_dword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
+ uint32_t, (_edp)->ed_u32[0]); \
+ \
+ _addr = (volatile uint32_t *)(_base + (_offset)); \
+ rte_write32_relaxed((_edp)->ed_u32[0], _addr); \
+ rte_wmb(); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile uint64_t *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_qword_t))); \
+ \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
+ uint32_t, (_eqp)->eq_u32[1], \
+ uint32_t, (_eqp)->eq_u32[0]); \
+ \
+ _addr = (volatile uint64_t *)(_base + (_offset)); \
+ rte_write64_relaxed((_eqp)->eq_u64[0], _addr); \
+ rte_wmb(); \
+ \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/*
+ * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
+ * (required by PIO hardware).
+ *
+ * Neither VFIO, nor UIO, nor NIC UIO (on FreeBSD) support
+ * write-combined memory mapped to user-land, so just abort if used.
+ */
+#define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
+ do { \
+ rte_panic("Write-combined BAR access not supported"); \
+ } while (B_FALSE)
+
+#define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
+ do { \
+ volatile uint8_t *_base = (_esbp)->esb_base; \
+ volatile __m128i *_addr; \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ SFC_ASSERT(IS_P2ALIGNED(_offset, sizeof(efx_oword_t))); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_LOCK(_esbp); \
+ \
+ EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
+ uint32_t, (_eop)->eo_u32[3], \
+ uint32_t, (_eop)->eo_u32[2], \
+ uint32_t, (_eop)->eo_u32[1], \
+ uint32_t, (_eop)->eo_u32[0]); \
+ \
+ _addr = (volatile __m128i *)(_base + (_offset)); \
+ /* There is no rte_write128_relaxed() yet */ \
+ _addr[0] = (_eop)->eo_u128[0]; \
+ rte_wmb(); \
+ \
+ _NOTE(CONSTANTCONDITION); \
+ if (_lock) \
+ SFC_BAR_UNLOCK(_esbp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* Use the standard octo-word write for doorbell writes */
+#define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
+ do { \
+ EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* SPIN */
+
+#define EFSYS_SPIN(_us) \
+ do { \
+ rte_delay_us(_us); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_SLEEP EFSYS_SPIN
+
+/* BARRIERS */
+
+#define EFSYS_MEM_READ_BARRIER() rte_rmb()
+#define EFSYS_PIO_WRITE_BARRIER() rte_io_wmb()
+
+/* DMA SYNC */
+
+/*
+ * DPDK does not provide any DMA syncing API, and no PMD drivers
+ * have any traces of explicit DMA syncing.
+ * DMA mapping is assumed to be coherent.
+ */
+
+#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
+
+/* Just avoid store and compiler (impliciltly) reordering */
+#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
+
+/* TIMESTAMP */
+
+typedef uint64_t efsys_timestamp_t;
+
+#define EFSYS_TIMESTAMP(_usp) \
+ do { \
+ *(_usp) = rte_get_timer_cycles() * 1000000 / \
+ rte_get_timer_hz(); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* KMEM */
+
+#define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
+ do { \
+ (_esip) = (_esip); \
+ (_p) = rte_zmalloc("sfc", (_size), 0); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_KMEM_FREE(_esip, _size, _p) \
+ do { \
+ (void)(_esip); \
+ (void)(_size); \
+ rte_free((_p)); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* LOCK */
+
+typedef rte_spinlock_t efsys_lock_t;
+
+#define SFC_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
+ rte_spinlock_init((_eslp))
+#define SFC_EFSYS_LOCK_DESTROY(_eslp) ((void)0)
+#define SFC_EFSYS_LOCK(_eslp) \
+ rte_spinlock_lock((_eslp))
+#define SFC_EFSYS_UNLOCK(_eslp) \
+ rte_spinlock_unlock((_eslp))
+#define SFC_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
+ SFC_ASSERT(rte_spinlock_is_locked((_eslp)))
+
+typedef int efsys_lock_state_t;
+
+#define EFSYS_LOCK_MAGIC 0x000010c4
+
+#define EFSYS_LOCK(_lockp, _state) \
+ do { \
+ SFC_EFSYS_LOCK(_lockp); \
+ (_state) = EFSYS_LOCK_MAGIC; \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_UNLOCK(_lockp, _state) \
+ do { \
+ SFC_ASSERT((_state) == EFSYS_LOCK_MAGIC); \
+ SFC_EFSYS_UNLOCK(_lockp); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* STAT */
+
+typedef uint64_t efsys_stat_t;
+
+#define EFSYS_STAT_INCR(_knp, _delta) \
+ do { \
+ *(_knp) += (_delta); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_DECR(_knp, _delta) \
+ do { \
+ *(_knp) -= (_delta); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET(_knp, _val) \
+ do { \
+ *(_knp) = (_val); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) = rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SET_DWORD(_knp, _valp) \
+ do { \
+ *(_knp) = rte_le_to_cpu_32((_valp)->ed_u32[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) += rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+#define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
+ do { \
+ *(_knp) -= rte_le_to_cpu_64((_valp)->eq_u64[0]); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+
+/* ERR */
+
+#if EFSYS_OPT_DECODE_INTR_FATAL
+#define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
+ do { \
+ (void)(_esip); \
+ RTE_LOG(ERR, PMD, "FATAL ERROR #%u (0x%08x%08x)\n", \
+ (_code), (_dword0), (_dword1)); \
+ _NOTE(CONSTANTCONDITION); \
+ } while (B_FALSE)
+#endif
+
+/* ASSERT */
+
+/* RTE_VERIFY from DPDK treats expressions with % operator incorrectly,
+ * so we re-implement it here
+ */
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+#define EFSYS_ASSERT(_exp) \
+ do { \
+ if (unlikely(!(_exp))) \
+ rte_panic("line %d\tassert \"%s\" failed\n", \
+ __LINE__, (#_exp)); \
+ } while (0)
+#else
+#define EFSYS_ASSERT(_exp) (void)(_exp)
+#endif
+
+#define EFSYS_ASSERT3(_x, _op, _y, _t) EFSYS_ASSERT((_t)(_x) _op (_t)(_y))
+
+#define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
+#define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
+#define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
+
+/* ROTATE */
+
+#define EFSYS_HAS_ROTL_DWORD 0
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_COMMON_EFSYS_H */
diff --git a/drivers/net/sfc/rte_pmd_sfc_efx_version.map b/drivers/net/sfc/rte_pmd_sfc_efx_version.map
new file mode 100644
index 00000000..31eca32e
--- /dev/null
+++ b/drivers/net/sfc/rte_pmd_sfc_efx_version.map
@@ -0,0 +1,4 @@
+DPDK_17.02 {
+
+ local: *;
+};
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
new file mode 100644
index 00000000..4e241b22
--- /dev/null
+++ b/drivers/net/sfc/sfc.c
@@ -0,0 +1,750 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* sysconf() */
+#include <unistd.h>
+
+#include <rte_errno.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+
+
+int
+sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
+ size_t len, int socket_id, efsys_mem_t *esmp)
+{
+ const struct rte_memzone *mz;
+
+ sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
+ name, id, len, socket_id);
+
+ mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
+ sysconf(_SC_PAGESIZE), socket_id);
+ if (mz == NULL) {
+ sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
+ name, (unsigned int)id, (unsigned int)len, socket_id,
+ rte_strerror(rte_errno));
+ return ENOMEM;
+ }
+
+ esmp->esm_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
+ if (esmp->esm_addr == RTE_BAD_PHYS_ADDR) {
+ (void)rte_memzone_free(mz);
+ return EFAULT;
+ }
+
+ esmp->esm_mz = mz;
+ esmp->esm_base = mz->addr;
+
+ return 0;
+}
+
+void
+sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
+{
+ int rc;
+
+ sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
+
+ rc = rte_memzone_free(esmp->esm_mz);
+ if (rc != 0)
+ sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
+
+ memset(esmp, 0, sizeof(*esmp));
+}
+
+static uint32_t
+sfc_phy_cap_from_link_speeds(uint32_t speeds)
+{
+ uint32_t phy_caps = 0;
+
+ if (~speeds & ETH_LINK_SPEED_FIXED) {
+ phy_caps |= (1 << EFX_PHY_CAP_AN);
+ /*
+ * If no speeds are specified in the mask, any supported
+ * may be negotiated
+ */
+ if (speeds == ETH_LINK_SPEED_AUTONEG)
+ phy_caps |=
+ (1 << EFX_PHY_CAP_1000FDX) |
+ (1 << EFX_PHY_CAP_10000FDX) |
+ (1 << EFX_PHY_CAP_40000FDX);
+ }
+ if (speeds & ETH_LINK_SPEED_1G)
+ phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
+ if (speeds & ETH_LINK_SPEED_10G)
+ phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
+ if (speeds & ETH_LINK_SPEED_40G)
+ phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
+
+ return phy_caps;
+}
+
+/*
+ * Check requested device level configuration.
+ * Receive and transmit configuration is checked in corresponding
+ * modules.
+ */
+static int
+sfc_check_conf(struct sfc_adapter *sa)
+{
+ const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
+ int rc = 0;
+
+ sa->port.phy_adv_cap =
+ sfc_phy_cap_from_link_speeds(conf->link_speeds) &
+ sa->port.phy_adv_cap_mask;
+ if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
+ sfc_err(sa, "No link speeds from mask %#x are supported",
+ conf->link_speeds);
+ rc = EINVAL;
+ }
+
+ if (conf->lpbk_mode != 0) {
+ sfc_err(sa, "Loopback not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->dcb_capability_en != 0) {
+ sfc_err(sa, "Priority-based flow control not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ sfc_err(sa, "Flow Director not supported");
+ rc = EINVAL;
+ }
+
+ if ((conf->intr_conf.lsc != 0) &&
+ (sa->intr.type != EFX_INTR_LINE) &&
+ (sa->intr.type != EFX_INTR_MESSAGE)) {
+ sfc_err(sa, "Link status change interrupt not supported");
+ rc = EINVAL;
+ }
+
+ if (conf->intr_conf.rxq != 0) {
+ sfc_err(sa, "Receive queue interrupt not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/*
+ * Find out maximum number of receive and transmit queues which could be
+ * advertised.
+ *
+ * NIC is kept initialized on success to allow other modules acquire
+ * defaults and capabilities.
+ */
+static int
+sfc_estimate_resource_limits(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ efx_drv_limits_t limits;
+ int rc;
+ uint32_t evq_allocated;
+ uint32_t rxq_allocated;
+ uint32_t txq_allocated;
+
+ memset(&limits, 0, sizeof(limits));
+
+ /* Request at least one Rx and Tx queue */
+ limits.edl_min_rxq_count = 1;
+ limits.edl_min_txq_count = 1;
+ /* Management event queue plus event queue for each Tx and Rx queue */
+ limits.edl_min_evq_count =
+ 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
+
+ /* Divide by number of functions to guarantee that all functions
+ * will get promised resources
+ */
+ /* FIXME Divide by number of functions (not 2) below */
+ limits.edl_max_evq_count = encp->enc_evq_limit / 2;
+ SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
+
+ /* Split equally between receive and transmit */
+ limits.edl_max_rxq_count =
+ MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
+ SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
+
+ limits.edl_max_txq_count =
+ MIN(encp->enc_txq_limit,
+ limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
+
+ if (sa->tso)
+ limits.edl_max_txq_count =
+ MIN(limits.edl_max_txq_count,
+ encp->enc_fw_assisted_tso_v2_n_contexts /
+ encp->enc_hw_pf_count);
+
+ SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
+
+ /* Configure the minimum required resources needed for the
+ * driver to operate, and the maximum desired resources that the
+ * driver is capable of using.
+ */
+ efx_nic_set_drv_limits(sa->nic, &limits);
+
+ sfc_log_init(sa, "init nic");
+ rc = efx_nic_init(sa->nic);
+ if (rc != 0)
+ goto fail_nic_init;
+
+ /* Find resource dimensions assigned by firmware to this function */
+ rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
+ &txq_allocated);
+ if (rc != 0)
+ goto fail_get_vi_pool;
+
+ /* It still may allocate more than maximum, ensure limit */
+ evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
+ rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
+ txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
+
+ /* Subtract management EVQ not used for traffic */
+ SFC_ASSERT(evq_allocated > 0);
+ evq_allocated--;
+
+ /* Right now we use separate EVQ for Rx and Tx */
+ sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
+ sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
+
+ /* Keep NIC initialized */
+ return 0;
+
+fail_get_vi_pool:
+fail_nic_init:
+ efx_nic_fini(sa->nic);
+ return rc;
+}
+
+static int
+sfc_set_drv_limits(struct sfc_adapter *sa)
+{
+ const struct rte_eth_dev_data *data = sa->eth_dev->data;
+ efx_drv_limits_t lim;
+
+ memset(&lim, 0, sizeof(lim));
+
+ /* Limits are strict since take into account initial estimation */
+ lim.edl_min_evq_count = lim.edl_max_evq_count =
+ 1 + data->nb_rx_queues + data->nb_tx_queues;
+ lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
+ lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
+
+ return efx_nic_set_drv_limits(sa->nic, &lim);
+}
+
+int
+sfc_start(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ switch (sa->state) {
+ case SFC_ADAPTER_CONFIGURED:
+ break;
+ case SFC_ADAPTER_STARTED:
+ sfc_info(sa, "already started");
+ return 0;
+ default:
+ rc = EINVAL;
+ goto fail_bad_state;
+ }
+
+ sa->state = SFC_ADAPTER_STARTING;
+
+ sfc_log_init(sa, "set resource limits");
+ rc = sfc_set_drv_limits(sa);
+ if (rc != 0)
+ goto fail_set_drv_limits;
+
+ sfc_log_init(sa, "init nic");
+ rc = efx_nic_init(sa->nic);
+ if (rc != 0)
+ goto fail_nic_init;
+
+ rc = sfc_intr_start(sa);
+ if (rc != 0)
+ goto fail_intr_start;
+
+ rc = sfc_ev_start(sa);
+ if (rc != 0)
+ goto fail_ev_start;
+
+ rc = sfc_port_start(sa);
+ if (rc != 0)
+ goto fail_port_start;
+
+ rc = sfc_rx_start(sa);
+ if (rc != 0)
+ goto fail_rx_start;
+
+ rc = sfc_tx_start(sa);
+ if (rc != 0)
+ goto fail_tx_start;
+
+ rc = sfc_flow_start(sa);
+ if (rc != 0)
+ goto fail_flows_insert;
+
+ sa->state = SFC_ADAPTER_STARTED;
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_flows_insert:
+ sfc_tx_stop(sa);
+
+fail_tx_start:
+ sfc_rx_stop(sa);
+
+fail_rx_start:
+ sfc_port_stop(sa);
+
+fail_port_start:
+ sfc_ev_stop(sa);
+
+fail_ev_start:
+ sfc_intr_stop(sa);
+
+fail_intr_start:
+ efx_nic_fini(sa->nic);
+
+fail_nic_init:
+fail_set_drv_limits:
+ sa->state = SFC_ADAPTER_CONFIGURED;
+fail_bad_state:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ switch (sa->state) {
+ case SFC_ADAPTER_STARTED:
+ break;
+ case SFC_ADAPTER_CONFIGURED:
+ sfc_info(sa, "already stopped");
+ return;
+ default:
+ sfc_err(sa, "stop in unexpected state %u", sa->state);
+ SFC_ASSERT(B_FALSE);
+ return;
+ }
+
+ sa->state = SFC_ADAPTER_STOPPING;
+
+ sfc_flow_stop(sa);
+ sfc_tx_stop(sa);
+ sfc_rx_stop(sa);
+ sfc_port_stop(sa);
+ sfc_ev_stop(sa);
+ sfc_intr_stop(sa);
+ efx_nic_fini(sa->nic);
+
+ sa->state = SFC_ADAPTER_CONFIGURED;
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_configure(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
+ sa->state == SFC_ADAPTER_CONFIGURED);
+ sa->state = SFC_ADAPTER_CONFIGURING;
+
+ rc = sfc_check_conf(sa);
+ if (rc != 0)
+ goto fail_check_conf;
+
+ rc = sfc_intr_configure(sa);
+ if (rc != 0)
+ goto fail_intr_configure;
+
+ rc = sfc_port_configure(sa);
+ if (rc != 0)
+ goto fail_port_configure;
+
+ rc = sfc_rx_configure(sa);
+ if (rc != 0)
+ goto fail_rx_configure;
+
+ rc = sfc_tx_configure(sa);
+ if (rc != 0)
+ goto fail_tx_configure;
+
+ sa->state = SFC_ADAPTER_CONFIGURED;
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_tx_configure:
+ sfc_rx_close(sa);
+
+fail_rx_configure:
+ sfc_port_close(sa);
+
+fail_port_configure:
+ sfc_intr_close(sa);
+
+fail_intr_configure:
+fail_check_conf:
+ sa->state = SFC_ADAPTER_INITIALIZED;
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
+ sa->state = SFC_ADAPTER_CLOSING;
+
+ sfc_tx_close(sa);
+ sfc_rx_close(sa);
+ sfc_port_close(sa);
+ sfc_intr_close(sa);
+
+ sa->state = SFC_ADAPTER_INITIALIZED;
+ sfc_log_init(sa, "done");
+}
+
+static int
+sfc_mem_bar_init(struct sfc_adapter *sa)
+{
+ struct rte_eth_dev *eth_dev = sa->eth_dev;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(eth_dev);
+ efsys_bar_t *ebp = &sa->mem_bar;
+ unsigned int i;
+ struct rte_mem_resource *res;
+
+ for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) {
+ res = &pci_dev->mem_resource[i];
+ if ((res->len != 0) && (res->phys_addr != 0)) {
+ /* Found first memory BAR */
+ SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
+ ebp->esb_rid = i;
+ ebp->esb_dev = pci_dev;
+ ebp->esb_base = res->addr;
+ return 0;
+ }
+ }
+
+ return EFAULT;
+}
+
+static void
+sfc_mem_bar_fini(struct sfc_adapter *sa)
+{
+ efsys_bar_t *ebp = &sa->mem_bar;
+
+ SFC_BAR_LOCK_DESTROY(ebp);
+ memset(ebp, 0, sizeof(*ebp));
+}
+
+#if EFSYS_OPT_RX_SCALE
+/*
+ * A fixed RSS key which has a property of being symmetric
+ * (symmetrical flows are distributed to the same CPU)
+ * and also known to give a uniform distribution
+ * (a good distribution of traffic between different CPUs)
+ */
+static const uint8_t default_rss_key[SFC_RSS_KEY_SIZE] = {
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+ 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
+};
+#endif
+
+static int
+sfc_set_rss_defaults(struct sfc_adapter *sa)
+{
+#if EFSYS_OPT_RX_SCALE
+ int rc;
+
+ rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
+ if (rc != 0)
+ goto fail_intr_init;
+
+ rc = efx_ev_init(sa->nic);
+ if (rc != 0)
+ goto fail_ev_init;
+
+ rc = efx_rx_init(sa->nic);
+ if (rc != 0)
+ goto fail_rx_init;
+
+ rc = efx_rx_scale_support_get(sa->nic, &sa->rss_support);
+ if (rc != 0)
+ goto fail_scale_support_get;
+
+ rc = efx_rx_hash_support_get(sa->nic, &sa->hash_support);
+ if (rc != 0)
+ goto fail_hash_support_get;
+
+ efx_rx_fini(sa->nic);
+ efx_ev_fini(sa->nic);
+ efx_intr_fini(sa->nic);
+
+ sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
+
+ rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
+
+ return 0;
+
+fail_hash_support_get:
+fail_scale_support_get:
+fail_rx_init:
+ efx_ev_fini(sa->nic);
+
+fail_ev_init:
+ efx_intr_fini(sa->nic);
+
+fail_intr_init:
+ return rc;
+#else
+ return 0;
+#endif
+}
+
+int
+sfc_attach(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp;
+ efx_nic_t *enp = sa->nic;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ efx_mcdi_new_epoch(enp);
+
+ sfc_log_init(sa, "reset nic");
+ rc = efx_nic_reset(enp);
+ if (rc != 0)
+ goto fail_nic_reset;
+
+ encp = efx_nic_cfg_get(sa->nic);
+
+ if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
+ sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
+ if (!sa->tso)
+ sfc_warn(sa,
+ "TSO support isn't available on this adapter");
+ }
+
+ sfc_log_init(sa, "estimate resource limits");
+ rc = sfc_estimate_resource_limits(sa);
+ if (rc != 0)
+ goto fail_estimate_rsrc_limits;
+
+ sa->txq_max_entries = encp->enc_txq_max_ndescs;
+ SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
+
+ rc = sfc_intr_attach(sa);
+ if (rc != 0)
+ goto fail_intr_attach;
+
+ rc = sfc_ev_attach(sa);
+ if (rc != 0)
+ goto fail_ev_attach;
+
+ rc = sfc_port_attach(sa);
+ if (rc != 0)
+ goto fail_port_attach;
+
+ rc = sfc_set_rss_defaults(sa);
+ if (rc != 0)
+ goto fail_set_rss_defaults;
+
+ rc = sfc_filter_attach(sa);
+ if (rc != 0)
+ goto fail_filter_attach;
+
+ sfc_log_init(sa, "fini nic");
+ efx_nic_fini(enp);
+
+ sfc_flow_init(sa);
+
+ sa->state = SFC_ADAPTER_INITIALIZED;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_filter_attach:
+fail_set_rss_defaults:
+ sfc_port_detach(sa);
+
+fail_port_attach:
+ sfc_ev_detach(sa);
+
+fail_ev_attach:
+ sfc_intr_detach(sa);
+
+fail_intr_attach:
+ efx_nic_fini(sa->nic);
+
+fail_estimate_rsrc_limits:
+fail_nic_reset:
+
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sfc_flow_fini(sa);
+
+ sfc_filter_detach(sa);
+ sfc_port_detach(sa);
+ sfc_ev_detach(sa);
+ sfc_intr_detach(sa);
+
+ sa->state = SFC_ADAPTER_UNINITIALIZED;
+}
+
+int
+sfc_probe(struct sfc_adapter *sa)
+{
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ efx_nic_t *enp;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sa->socket_id = rte_socket_id();
+
+ sfc_log_init(sa, "init mem bar");
+ rc = sfc_mem_bar_init(sa);
+ if (rc != 0)
+ goto fail_mem_bar_init;
+
+ sfc_log_init(sa, "get family");
+ rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
+ &sa->family);
+ if (rc != 0)
+ goto fail_family;
+ sfc_log_init(sa, "family is %u", sa->family);
+
+ sfc_log_init(sa, "create nic");
+ rte_spinlock_init(&sa->nic_lock);
+ rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
+ &sa->mem_bar, &sa->nic_lock, &enp);
+ if (rc != 0)
+ goto fail_nic_create;
+ sa->nic = enp;
+
+ rc = sfc_mcdi_init(sa);
+ if (rc != 0)
+ goto fail_mcdi_init;
+
+ sfc_log_init(sa, "probe nic");
+ rc = efx_nic_probe(enp);
+ if (rc != 0)
+ goto fail_nic_probe;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_nic_probe:
+ sfc_mcdi_fini(sa);
+
+fail_mcdi_init:
+ sfc_log_init(sa, "destroy nic");
+ sa->nic = NULL;
+ efx_nic_destroy(enp);
+
+fail_nic_create:
+fail_family:
+ sfc_mem_bar_fini(sa);
+
+fail_mem_bar_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_unprobe(struct sfc_adapter *sa)
+{
+ efx_nic_t *enp = sa->nic;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ sfc_log_init(sa, "unprobe nic");
+ efx_nic_unprobe(enp);
+
+ sfc_mcdi_fini(sa);
+
+ sfc_log_init(sa, "destroy nic");
+ sa->nic = NULL;
+ efx_nic_destroy(enp);
+
+ sfc_mem_bar_fini(sa);
+
+ sfc_flow_fini(sa);
+ sa->state = SFC_ADAPTER_UNINITIALIZED;
+}
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
new file mode 100644
index 00000000..fad0ce04
--- /dev/null
+++ b/drivers/net/sfc/sfc.h
@@ -0,0 +1,322 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_H
+#define _SFC_H
+
+#include <stdbool.h>
+
+#include <rte_ethdev.h>
+#include <rte_kvargs.h>
+#include <rte_spinlock.h>
+
+#include "efx.h"
+
+#include "sfc_filter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
+#if EFSYS_OPT_RX_SCALE
+/** RSS key length (bytes) */
+#define SFC_RSS_KEY_SIZE 40
+/** RSS hash offloads mask */
+#define SFC_RSS_OFFLOADS (ETH_RSS_IP | ETH_RSS_TCP)
+#endif
+
+/*
+ * +---------------+
+ * | UNINITIALIZED |<-----------+
+ * +---------------+ |
+ * |.eth_dev_init |.eth_dev_uninit
+ * V |
+ * +---------------+------------+
+ * | INITIALIZED |
+ * +---------------+<-----------<---------------+
+ * |.dev_configure | |
+ * V |failed |
+ * +---------------+------------+ |
+ * | CONFIGURING | |
+ * +---------------+----+ |
+ * |success | |
+ * | | +---------------+
+ * | | | CLOSING |
+ * | | +---------------+
+ * | | ^
+ * V |.dev_configure |
+ * +---------------+----+ |.dev_close
+ * | CONFIGURED |----------------------------+
+ * +---------------+<-----------+
+ * |.dev_start |
+ * V |
+ * +---------------+ |
+ * | STARTING |------------^
+ * +---------------+ failed |
+ * |success |
+ * | +---------------+
+ * | | STOPPING |
+ * | +---------------+
+ * | ^
+ * V |.dev_stop
+ * +---------------+------------+
+ * | STARTED |
+ * +---------------+
+ */
+enum sfc_adapter_state {
+ SFC_ADAPTER_UNINITIALIZED = 0,
+ SFC_ADAPTER_INITIALIZED,
+ SFC_ADAPTER_CONFIGURING,
+ SFC_ADAPTER_CONFIGURED,
+ SFC_ADAPTER_CLOSING,
+ SFC_ADAPTER_STARTING,
+ SFC_ADAPTER_STARTED,
+ SFC_ADAPTER_STOPPING,
+
+ SFC_ADAPTER_NSTATES
+};
+
+enum sfc_dev_filter_mode {
+ SFC_DEV_FILTER_MODE_PROMISC = 0,
+ SFC_DEV_FILTER_MODE_ALLMULTI,
+
+ SFC_DEV_FILTER_NMODES
+};
+
+enum sfc_mcdi_state {
+ SFC_MCDI_UNINITIALIZED = 0,
+ SFC_MCDI_INITIALIZED,
+ SFC_MCDI_BUSY,
+ SFC_MCDI_COMPLETED,
+
+ SFC_MCDI_NSTATES
+};
+
+struct sfc_mcdi {
+ rte_spinlock_t lock;
+ efsys_mem_t mem;
+ enum sfc_mcdi_state state;
+ efx_mcdi_transport_t transport;
+ bool logging;
+ uint32_t proxy_handle;
+ efx_rc_t proxy_result;
+};
+
+struct sfc_intr {
+ efx_intr_type_t type;
+ rte_intr_callback_fn handler;
+ boolean_t lsc_intr;
+};
+
+struct sfc_rxq_info;
+struct sfc_txq_info;
+struct sfc_dp_rx;
+
+struct sfc_port {
+ unsigned int lsc_seq;
+
+ uint32_t phy_adv_cap_mask;
+ uint32_t phy_adv_cap;
+
+ unsigned int flow_ctrl;
+ boolean_t flow_ctrl_autoneg;
+ size_t pdu;
+
+ boolean_t promisc;
+ boolean_t allmulti;
+
+ unsigned int max_mcast_addrs;
+ unsigned int nb_mcast_addrs;
+ uint8_t *mcast_addrs;
+
+ rte_spinlock_t mac_stats_lock;
+ uint64_t *mac_stats_buf;
+ efsys_mem_t mac_stats_dma_mem;
+ boolean_t mac_stats_reset_pending;
+ uint16_t mac_stats_update_period_ms;
+ uint32_t mac_stats_update_generation;
+ boolean_t mac_stats_periodic_dma_supported;
+ uint64_t mac_stats_last_request_timestamp;
+
+ uint32_t mac_stats_mask[EFX_MAC_STATS_MASK_NPAGES];
+};
+
+/* Adapter private data */
+struct sfc_adapter {
+ /*
+ * PMD setup and configuration is not thread safe. Since it is not
+ * performance sensitive, it is better to guarantee thread-safety
+ * and add device level lock. Adapter control operations which
+ * change its state should acquire the lock.
+ */
+ rte_spinlock_t lock;
+ enum sfc_adapter_state state;
+ struct rte_eth_dev *eth_dev;
+ struct rte_kvargs *kvargs;
+ bool debug_init;
+ int socket_id;
+ efsys_bar_t mem_bar;
+ efx_family_t family;
+ efx_nic_t *nic;
+ rte_spinlock_t nic_lock;
+
+ struct sfc_mcdi mcdi;
+ struct sfc_intr intr;
+ struct sfc_port port;
+ struct sfc_filter filter;
+
+ unsigned int rxq_max;
+ unsigned int txq_max;
+
+ unsigned int txq_max_entries;
+
+ uint32_t evq_flags;
+ unsigned int evq_count;
+
+ unsigned int mgmt_evq_index;
+ rte_spinlock_t mgmt_evq_lock;
+ struct sfc_evq *mgmt_evq;
+
+ unsigned int rxq_count;
+ struct sfc_rxq_info *rxq_info;
+
+ unsigned int txq_count;
+ struct sfc_txq_info *txq_info;
+
+ boolean_t tso;
+
+ unsigned int rss_channels;
+
+#if EFSYS_OPT_RX_SCALE
+ efx_rx_scale_support_t rss_support;
+ efx_rx_hash_support_t hash_support;
+ efx_rx_hash_type_t rss_hash_types;
+ unsigned int rss_tbl[EFX_RSS_TBL_SIZE];
+ uint8_t rss_key[SFC_RSS_KEY_SIZE];
+#endif
+
+ const struct sfc_dp_rx *dp_rx;
+ const struct sfc_dp_tx *dp_tx;
+};
+
+/*
+ * Add wrapper functions to acquire/release lock to be able to remove or
+ * change the lock in one place.
+ */
+
+static inline void
+sfc_adapter_lock_init(struct sfc_adapter *sa)
+{
+ rte_spinlock_init(&sa->lock);
+}
+
+static inline int
+sfc_adapter_is_locked(struct sfc_adapter *sa)
+{
+ return rte_spinlock_is_locked(&sa->lock);
+}
+
+static inline void
+sfc_adapter_lock(struct sfc_adapter *sa)
+{
+ rte_spinlock_lock(&sa->lock);
+}
+
+static inline int
+sfc_adapter_trylock(struct sfc_adapter *sa)
+{
+ return rte_spinlock_trylock(&sa->lock);
+}
+
+static inline void
+sfc_adapter_unlock(struct sfc_adapter *sa)
+{
+ rte_spinlock_unlock(&sa->lock);
+}
+
+static inline void
+sfc_adapter_lock_fini(__rte_unused struct sfc_adapter *sa)
+{
+ /* Just for symmetry of the API */
+}
+
+/** Get the number of milliseconds since boot from the default timer */
+static inline uint64_t
+sfc_get_system_msecs(void)
+{
+ return rte_get_timer_cycles() * MS_PER_S / rte_get_timer_hz();
+}
+
+int sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
+ size_t len, int socket_id, efsys_mem_t *esmp);
+void sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp);
+
+int sfc_probe(struct sfc_adapter *sa);
+void sfc_unprobe(struct sfc_adapter *sa);
+int sfc_attach(struct sfc_adapter *sa);
+void sfc_detach(struct sfc_adapter *sa);
+int sfc_start(struct sfc_adapter *sa);
+void sfc_stop(struct sfc_adapter *sa);
+
+int sfc_mcdi_init(struct sfc_adapter *sa);
+void sfc_mcdi_fini(struct sfc_adapter *sa);
+
+int sfc_configure(struct sfc_adapter *sa);
+void sfc_close(struct sfc_adapter *sa);
+
+int sfc_intr_attach(struct sfc_adapter *sa);
+void sfc_intr_detach(struct sfc_adapter *sa);
+int sfc_intr_configure(struct sfc_adapter *sa);
+void sfc_intr_close(struct sfc_adapter *sa);
+int sfc_intr_start(struct sfc_adapter *sa);
+void sfc_intr_stop(struct sfc_adapter *sa);
+
+int sfc_port_attach(struct sfc_adapter *sa);
+void sfc_port_detach(struct sfc_adapter *sa);
+int sfc_port_configure(struct sfc_adapter *sa);
+void sfc_port_close(struct sfc_adapter *sa);
+int sfc_port_start(struct sfc_adapter *sa);
+void sfc_port_stop(struct sfc_adapter *sa);
+void sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
+ struct rte_eth_link *link_info);
+int sfc_port_update_mac_stats(struct sfc_adapter *sa);
+int sfc_port_reset_mac_stats(struct sfc_adapter *sa);
+int sfc_set_rx_mode(struct sfc_adapter *sa);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_H */
diff --git a/drivers/net/sfc/sfc_debug.h b/drivers/net/sfc/sfc_debug.h
new file mode 100644
index 00000000..c0b48677
--- /dev/null
+++ b/drivers/net/sfc/sfc_debug.h
@@ -0,0 +1,59 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DEBUG_H_
+#define _SFC_DEBUG_H_
+
+#include <rte_debug.h>
+
+#ifdef RTE_LIBRTE_SFC_EFX_DEBUG
+/* Avoid dependency from RTE_LOG_LEVEL to be able to enable debug check
+ * in the driver only.
+ */
+#define SFC_ASSERT(exp) RTE_VERIFY(exp)
+#else
+/* If the driver debug is not enabled, follow DPDK debug/non-debug */
+#define SFC_ASSERT(exp) RTE_ASSERT(exp)
+#endif
+
+/* Log PMD message, automatically add prefix and \n */
+#define sfc_panic(sa, fmt, args...) \
+ do { \
+ const struct rte_eth_dev *_dev = (sa)->eth_dev; \
+ const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ \
+ rte_panic("sfc " PCI_PRI_FMT " #%" PRIu8 ": " fmt "\n", \
+ _pci_dev->addr.domain, _pci_dev->addr.bus, \
+ _pci_dev->addr.devid, _pci_dev->addr.function,\
+ _dev->data->port_id, ##args); \
+ } while (0)
+
+#endif /* _SFC_DEBUG_H_ */
diff --git a/drivers/net/sfc/sfc_dp.c b/drivers/net/sfc/sfc_dp.c
new file mode 100644
index 00000000..860aa921
--- /dev/null
+++ b/drivers/net/sfc/sfc_dp.c
@@ -0,0 +1,100 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_log.h>
+
+#include "sfc_dp.h"
+
+void
+sfc_dp_queue_init(struct sfc_dp_queue *dpq, uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr)
+{
+ dpq->port_id = port_id;
+ dpq->queue_id = queue_id;
+ dpq->pci_addr = *pci_addr;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_name(struct sfc_dp_list *head, enum sfc_dp_type type,
+ const char *name)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ if (strcmp(entry->name, name) == 0)
+ return entry;
+ }
+
+ return NULL;
+}
+
+struct sfc_dp *
+sfc_dp_find_by_caps(struct sfc_dp_list *head, enum sfc_dp_type type,
+ unsigned int avail_caps)
+{
+ struct sfc_dp *entry;
+
+ TAILQ_FOREACH(entry, head, links) {
+ if (entry->type != type)
+ continue;
+
+ /* Take the first matching */
+ if (sfc_dp_match_hw_fw_caps(entry, avail_caps))
+ return entry;
+ }
+
+ return NULL;
+}
+
+int
+sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry)
+{
+ if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
+ rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD,
+ "sfc %s dapapath '%s' already registered\n",
+ entry->type == SFC_DP_RX ? "Rx" :
+ entry->type == SFC_DP_TX ? "Tx" :
+ "unknown",
+ entry->name);
+ return EEXIST;
+ }
+
+ TAILQ_INSERT_TAIL(head, entry, links);
+
+ return 0;
+}
diff --git a/drivers/net/sfc/sfc_dp.h b/drivers/net/sfc/sfc_dp.h
new file mode 100644
index 00000000..eff0aa87
--- /dev/null
+++ b/drivers/net/sfc/sfc_dp.h
@@ -0,0 +1,125 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_H
+#define _SFC_DP_H
+
+#include <stdbool.h>
+#include <sys/queue.h>
+
+#include <rte_pci.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_DIV_ROUND_UP(a, b) \
+ __extension__ ({ \
+ typeof(a) _a = (a); \
+ typeof(b) _b = (b); \
+ \
+ (_a + (_b - 1)) / _b; \
+ })
+
+/**
+ * Datapath exception handler to be provided by the control path.
+ */
+typedef void (sfc_dp_exception_t)(void *ctrl);
+
+enum sfc_dp_type {
+ SFC_DP_RX = 0, /**< Receive datapath */
+ SFC_DP_TX, /**< Transmit datapath */
+};
+
+
+/** Datapath queue run-time information */
+struct sfc_dp_queue {
+ uint16_t port_id;
+ uint16_t queue_id;
+ struct rte_pci_addr pci_addr;
+};
+
+void sfc_dp_queue_init(struct sfc_dp_queue *dpq,
+ uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr);
+
+/*
+ * Helper macro to define datapath logging macros and have uniform
+ * logging.
+ */
+#define SFC_DP_LOG(dp_name, level, dpq, ...) \
+ do { \
+ const struct sfc_dp_queue *_dpq = (dpq); \
+ const struct rte_pci_addr *_addr = &(_dpq)->pci_addr; \
+ \
+ RTE_LOG(level, PMD, \
+ RTE_FMT("%s " PCI_PRI_FMT \
+ " #%" PRIu16 ".%" PRIu16 ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ dp_name, \
+ _addr->domain, _addr->bus, \
+ _addr->devid, _addr->function, \
+ _dpq->port_id, _dpq->queue_id, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+
+/** Datapath definition */
+struct sfc_dp {
+ TAILQ_ENTRY(sfc_dp) links;
+ const char *name;
+ enum sfc_dp_type type;
+ /* Mask of required hardware/firmware capabilities */
+ unsigned int hw_fw_caps;
+#define SFC_DP_HW_FW_CAP_EF10 0x1
+};
+
+/** List of datapath variants */
+TAILQ_HEAD(sfc_dp_list, sfc_dp);
+
+/* Check if available HW/FW capabilities are sufficient for the datapath */
+static inline bool
+sfc_dp_match_hw_fw_caps(const struct sfc_dp *dp, unsigned int avail_caps)
+{
+ return (dp->hw_fw_caps & avail_caps) == dp->hw_fw_caps;
+}
+
+struct sfc_dp *sfc_dp_find_by_name(struct sfc_dp_list *head,
+ enum sfc_dp_type type, const char *name);
+struct sfc_dp *sfc_dp_find_by_caps(struct sfc_dp_list *head,
+ enum sfc_dp_type type,
+ unsigned int avail_caps);
+int sfc_dp_register(struct sfc_dp_list *head, struct sfc_dp *entry);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_H */
diff --git a/drivers/net/sfc/sfc_dp_rx.h b/drivers/net/sfc/sfc_dp_rx.h
new file mode 100644
index 00000000..9d05a4b3
--- /dev/null
+++ b/drivers/net/sfc/sfc_dp_rx.h
@@ -0,0 +1,197 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_RX_H
+#define _SFC_DP_RX_H
+
+#include <rte_mempool.h>
+#include <rte_ethdev.h>
+
+#include "sfc_dp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic receive queue information used on data path.
+ * It must be kept as small as it is possible since it is built into
+ * the structure used on datapath.
+ */
+struct sfc_dp_rxq {
+ struct sfc_dp_queue dpq;
+};
+
+/**
+ * Datapath receive queue creation information.
+ *
+ * The structure is used just to pass information from control path to
+ * datapath. It could be just function arguments, but it would be hardly
+ * readable.
+ */
+struct sfc_dp_rx_qcreate_info {
+ /** Memory pool to allocate Rx buffer from */
+ struct rte_mempool *refill_mb_pool;
+ /** Minimum number of unused Rx descriptors to do refill */
+ unsigned int refill_threshold;
+ /**
+ * Usable mbuf data space in accordance with alignment and
+ * padding requirements imposed by HW.
+ */
+ unsigned int buf_size;
+
+ /**
+ * Maximum number of Rx descriptors completed in one Rx event.
+ * Just for sanity checks if datapath would like to do.
+ */
+ unsigned int batch_max;
+
+ /** Pseudo-header size */
+ unsigned int prefix_size;
+
+ /** Receive queue flags initializer */
+ unsigned int flags;
+#define SFC_RXQ_FLAG_RSS_HASH 0x1
+
+ /** Rx queue size */
+ unsigned int rxq_entries;
+ /** DMA-mapped Rx descriptors ring */
+ void *rxq_hw_ring;
+
+ /** Associated event queue size */
+ unsigned int evq_entries;
+ /** Hardware event ring */
+ void *evq_hw_ring;
+
+ /** The queue index in hardware (required to push right doorbell) */
+ unsigned int hw_index;
+ /**
+ * Virtual address of the memory-mapped BAR to push Rx refill
+ * doorbell
+ */
+ volatile void *mem_bar;
+};
+
+/**
+ * Allocate and initialize datapath receive queue.
+ *
+ * @param port_id The port identifier
+ * @param queue_id The queue identifier
+ * @param pci_addr PCI function address
+ * @param socket_id Socket identifier to allocate memory
+ * @param info Receive queue information
+ * @param dp_rxqp Location for generic datapath receive queue pointer
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_rx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp);
+
+/**
+ * Free resources allocated for datapath recevie queue.
+ */
+typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue start callback.
+ *
+ * It handovers EvQ to the datapath.
+ */
+typedef int (sfc_dp_rx_qstart_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int evq_read_ptr);
+
+/**
+ * Receive queue stop function called before flush.
+ */
+typedef void (sfc_dp_rx_qstop_t)(struct sfc_dp_rxq *dp_rxq,
+ unsigned int *evq_read_ptr);
+
+/**
+ * Receive event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_rx_qrx_ev_t)(struct sfc_dp_rxq *dp_rxq, unsigned int id);
+
+/**
+ * Receive queue purge function called after queue flush.
+ *
+ * Should be used to free unused recevie buffers.
+ */
+typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Get packet types recognized/classified */
+typedef const uint32_t * (sfc_dp_rx_supported_ptypes_get_t)(void);
+
+/** Get number of pending Rx descriptors */
+typedef unsigned int (sfc_dp_rx_qdesc_npending_t)(struct sfc_dp_rxq *dp_rxq);
+
+/** Receive datapath definition */
+struct sfc_dp_rx {
+ struct sfc_dp dp;
+
+ unsigned int features;
+#define SFC_DP_RX_FEAT_SCATTER 0x1
+ sfc_dp_rx_qcreate_t *qcreate;
+ sfc_dp_rx_qdestroy_t *qdestroy;
+ sfc_dp_rx_qstart_t *qstart;
+ sfc_dp_rx_qstop_t *qstop;
+ sfc_dp_rx_qrx_ev_t *qrx_ev;
+ sfc_dp_rx_qpurge_t *qpurge;
+ sfc_dp_rx_supported_ptypes_get_t *supported_ptypes_get;
+ sfc_dp_rx_qdesc_npending_t *qdesc_npending;
+ eth_rx_burst_t pkt_burst;
+};
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_name(struct sfc_dp_list *head, const char *name)
+{
+ struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_RX, name);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+static inline struct sfc_dp_rx *
+sfc_dp_find_rx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
+{
+ struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_RX, avail_caps);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_rx, dp);
+}
+
+extern struct sfc_dp_rx sfc_efx_rx;
+extern struct sfc_dp_rx sfc_ef10_rx;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_RX_H */
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
new file mode 100644
index 00000000..2bb9a2e7
--- /dev/null
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -0,0 +1,170 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_DP_TX_H
+#define _SFC_DP_TX_H
+
+#include <rte_ethdev.h>
+
+#include "sfc_dp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Generic transmit queue information used on data path.
+ * It must be kept as small as it is possible since it is built into
+ * the structure used on datapath.
+ */
+struct sfc_dp_txq {
+ struct sfc_dp_queue dpq;
+};
+
+/**
+ * Datapath transmit queue creation information.
+ *
+ * The structure is used just to pass information from control path to
+ * datapath. It could be just function arguments, but it would be hardly
+ * readable.
+ */
+struct sfc_dp_tx_qcreate_info {
+ /** Minimum number of unused Tx descriptors to do reap */
+ unsigned int free_thresh;
+ /** Transmit queue configuration flags */
+ unsigned int flags;
+ /** Tx queue size */
+ unsigned int txq_entries;
+ /** Maximum size of data in the DMA descriptor */
+ uint16_t dma_desc_size_max;
+ /** DMA-mapped Tx descriptors ring */
+ void *txq_hw_ring;
+ /** Associated event queue size */
+ unsigned int evq_entries;
+ /** Hardware event ring */
+ void *evq_hw_ring;
+ /** The queue index in hardware (required to push right doorbell) */
+ unsigned int hw_index;
+ /** Virtual address of the memory-mapped BAR to push Tx doorbell */
+ volatile void *mem_bar;
+};
+
+/**
+ * Allocate and initialize datapath transmit queue.
+ *
+ * @param port_id The port identifier
+ * @param queue_id The queue identifier
+ * @param pci_addr PCI function address
+ * @param socket_id Socket identifier to allocate memory
+ * @param info Tx queue details wrapped in structure
+ * @param dp_txqp Location for generic datapath transmit queue pointer
+ *
+ * @return 0 or positive errno.
+ */
+typedef int (sfc_dp_tx_qcreate_t)(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp);
+
+/**
+ * Free resources allocated for datapath transmit queue.
+ */
+typedef void (sfc_dp_tx_qdestroy_t)(struct sfc_dp_txq *dp_txq);
+
+/**
+ * Transmit queue start callback.
+ *
+ * It handovers EvQ to the datapath.
+ */
+typedef int (sfc_dp_tx_qstart_t)(struct sfc_dp_txq *dp_txq,
+ unsigned int evq_read_ptr,
+ unsigned int txq_desc_index);
+
+/**
+ * Transmit queue stop function called before the queue flush.
+ *
+ * It returns EvQ to the control path.
+ */
+typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq,
+ unsigned int *evq_read_ptr);
+
+/**
+ * Transmit event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id);
+
+/**
+ * Transmit queue function called after the queue flush.
+ */
+typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
+
+/** Transmit datapath definition */
+struct sfc_dp_tx {
+ struct sfc_dp dp;
+
+ unsigned int features;
+#define SFC_DP_TX_FEAT_VLAN_INSERT 0x1
+#define SFC_DP_TX_FEAT_TSO 0x2
+#define SFC_DP_TX_FEAT_MULTI_SEG 0x4
+ sfc_dp_tx_qcreate_t *qcreate;
+ sfc_dp_tx_qdestroy_t *qdestroy;
+ sfc_dp_tx_qstart_t *qstart;
+ sfc_dp_tx_qstop_t *qstop;
+ sfc_dp_tx_qtx_ev_t *qtx_ev;
+ sfc_dp_tx_qreap_t *qreap;
+ eth_tx_burst_t pkt_burst;
+};
+
+static inline struct sfc_dp_tx *
+sfc_dp_find_tx_by_name(struct sfc_dp_list *head, const char *name)
+{
+ struct sfc_dp *p = sfc_dp_find_by_name(head, SFC_DP_TX, name);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
+}
+
+static inline struct sfc_dp_tx *
+sfc_dp_find_tx_by_caps(struct sfc_dp_list *head, unsigned int avail_caps)
+{
+ struct sfc_dp *p = sfc_dp_find_by_caps(head, SFC_DP_TX, avail_caps);
+
+ return (p == NULL) ? NULL : container_of(p, struct sfc_dp_tx, dp);
+}
+
+extern struct sfc_dp_tx sfc_efx_tx;
+extern struct sfc_dp_tx sfc_ef10_tx;
+extern struct sfc_dp_tx sfc_ef10_simple_tx;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_DP_TX_H */
diff --git a/drivers/net/sfc/sfc_ef10.h b/drivers/net/sfc/sfc_ef10.h
new file mode 100644
index 00000000..060d8fef
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10.h
@@ -0,0 +1,107 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_EF10_H
+#define _SFC_EF10_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Number of events in one cache line */
+#define SFC_EF10_EV_PER_CACHE_LINE \
+ (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t))
+
+#define SFC_EF10_EV_QCLEAR_MASK (~(SFC_EF10_EV_PER_CACHE_LINE - 1))
+
+#if defined(SFC_EF10_EV_QCLEAR_USE_EFX)
+static inline void
+sfc_ef10_ev_qclear_cache_line(void *ptr)
+{
+ efx_qword_t *entry = ptr;
+ unsigned int i;
+
+ for (i = 0; i < SFC_EF10_EV_PER_CACHE_LINE; ++i)
+ EFX_SET_QWORD(entry[i]);
+}
+#else
+/*
+ * It is possible to do it using AVX2 and AVX512F, but it shows less
+ * performance.
+ */
+static inline void
+sfc_ef10_ev_qclear_cache_line(void *ptr)
+{
+ const __m128i val = _mm_set1_epi64x(UINT64_MAX);
+ __m128i *addr = ptr;
+ unsigned int i;
+
+ RTE_BUILD_BUG_ON(sizeof(val) > RTE_CACHE_LINE_SIZE);
+ RTE_BUILD_BUG_ON(RTE_CACHE_LINE_SIZE % sizeof(val) != 0);
+
+ for (i = 0; i < RTE_CACHE_LINE_SIZE / sizeof(val); ++i)
+ _mm_store_si128(&addr[i], val);
+}
+#endif
+
+static inline void
+sfc_ef10_ev_qclear(efx_qword_t *hw_ring, unsigned int ptr_mask,
+ unsigned int old_read_ptr, unsigned int read_ptr)
+{
+ const unsigned int clear_ptr = read_ptr & SFC_EF10_EV_QCLEAR_MASK;
+ unsigned int old_clear_ptr = old_read_ptr & SFC_EF10_EV_QCLEAR_MASK;
+
+ while (old_clear_ptr != clear_ptr) {
+ sfc_ef10_ev_qclear_cache_line(
+ &hw_ring[old_clear_ptr & ptr_mask]);
+ old_clear_ptr += SFC_EF10_EV_PER_CACHE_LINE;
+ }
+
+ /*
+ * No barriers here.
+ * Functions which push doorbell should care about correct
+ * ordering: store instructions which fill in EvQ ring should be
+ * retired from CPU and DMA sync before doorbell which will allow
+ * to use these event entries.
+ */
+}
+
+static inline bool
+sfc_ef10_ev_present(const efx_qword_t ev)
+{
+ return ~EFX_QWORD_FIELD(ev, EFX_DWORD_0) |
+ ~EFX_QWORD_FIELD(ev, EFX_DWORD_1);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EF10_H */
diff --git a/drivers/net/sfc/sfc_ef10_rx.c b/drivers/net/sfc/sfc_ef10_rx.c
new file mode 100644
index 00000000..1484baba
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10_rx.c
@@ -0,0 +1,712 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* EF10 native datapath implementation */
+
+#include <stdbool.h>
+
+#include <rte_byteorder.h>
+#include <rte_mbuf_ptype.h>
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_tweak.h"
+#include "sfc_dp_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+#define sfc_ef10_rx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
+
+/**
+ * Alignment requirement for value written to RX WPTR:
+ * the WPTR must be aligned to an 8 descriptor boundary.
+ */
+#define SFC_EF10_RX_WPTR_ALIGN 8
+
+/**
+ * Maximum number of descriptors/buffers in the Rx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ * EF10 native datapath uses event queue of the same size as Rx queue.
+ * Maximum number of events on datapath can be estimated as number of
+ * Rx queue entries (one event per Rx buffer in the worst case) plus
+ * Rx error and flush events.
+ */
+#define SFC_EF10_RXQ_LIMIT(_ndesc) \
+ ((_ndesc) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_rx_sw_desc {
+ struct rte_mbuf *mbuf;
+};
+
+struct sfc_ef10_rxq {
+ /* Used on data path */
+ unsigned int flags;
+#define SFC_EF10_RXQ_STARTED 0x1
+#define SFC_EF10_RXQ_NOT_RUNNING 0x2
+#define SFC_EF10_RXQ_EXCEPTION 0x4
+#define SFC_EF10_RXQ_RSS_HASH 0x8
+ unsigned int ptr_mask;
+ unsigned int prepared;
+ unsigned int completed;
+ unsigned int evq_read_ptr;
+ efx_qword_t *evq_hw_ring;
+ struct sfc_ef10_rx_sw_desc *sw_ring;
+ uint64_t rearm_data;
+ uint16_t prefix_size;
+
+ /* Used on refill */
+ uint16_t buf_size;
+ unsigned int added;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ efx_qword_t *rxq_hw_ring;
+ volatile void *doorbell;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_ef10_rxq *
+sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
+}
+
+static void
+sfc_ef10_rx_qpush(struct sfc_ef10_rxq *rxq)
+{
+ efx_dword_t dword;
+
+ /* Hardware has alignment restriction for WPTR */
+ RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
+ SFC_ASSERT(RTE_ALIGN(rxq->added, SFC_EF10_RX_WPTR_ALIGN) == rxq->added);
+
+ EFX_POPULATE_DWORD_1(dword, ERF_DZ_RX_DESC_WPTR,
+ rxq->added & rxq->ptr_mask);
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_write32() has rte_io_wmb() which guarantees that the STORE
+ * operations (i.e. Rx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_write32(dword.ed_u32[0], rxq->doorbell);
+}
+
+static void
+sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
+{
+ const unsigned int ptr_mask = rxq->ptr_mask;
+ const uint32_t buf_size = rxq->buf_size;
+ unsigned int free_space;
+ unsigned int bulks;
+ void *objs[SFC_RX_REFILL_BULK];
+ unsigned int added = rxq->added;
+
+ free_space = SFC_EF10_RXQ_LIMIT(ptr_mask + 1) -
+ (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(objs);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ do {
+ unsigned int id;
+ unsigned int i;
+
+ if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
+ RTE_DIM(objs)) < 0)) {
+ struct rte_eth_dev_data *dev_data =
+ rte_eth_devices[rxq->dp.dpq.port_id].data;
+
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0, id = added & ptr_mask;
+ i < RTE_DIM(objs);
+ ++i, ++id) {
+ struct rte_mbuf *m = objs[i];
+ struct sfc_ef10_rx_sw_desc *rxd;
+ phys_addr_t phys_addr;
+
+ SFC_ASSERT((id & ~ptr_mask) == 0);
+ rxd = &rxq->sw_ring[id];
+ rxd->mbuf = m;
+
+ /*
+ * Avoid writing to mbuf. It is cheaper to do it
+ * when we receive packet and fill in nearby
+ * structure members.
+ */
+
+ phys_addr = rte_mbuf_data_dma_addr_default(m);
+ EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
+ ESF_DZ_RX_KER_BYTE_CNT, buf_size,
+ ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
+ }
+
+ added += RTE_DIM(objs);
+ } while (--bulks > 0);
+
+ SFC_ASSERT(rxq->added != added);
+ rxq->added = added;
+ sfc_ef10_rx_qpush(rxq);
+}
+
+static void
+sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
+{
+ struct rte_mbuf *next_mbuf;
+
+ /* Prefetch next bunch of software descriptors */
+ if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
+ rte_prefetch0(&rxq->sw_ring[next_id]);
+
+ /*
+ * It looks strange to prefetch depending on previous prefetch
+ * data, but measurements show that it is really efficient and
+ * increases packet rate.
+ */
+ next_mbuf = rxq->sw_ring[next_id].mbuf;
+ if (likely(next_mbuf != NULL)) {
+ /* Prefetch the next mbuf structure */
+ rte_mbuf_prefetch_part1(next_mbuf);
+
+ /* Prefetch pseudo header of the next packet */
+ /* data_off is not filled in yet */
+ /* Yes, data could be not ready yet, but we hope */
+ rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
+ RTE_PKTMBUF_HEADROOM);
+ }
+}
+
+static uint16_t
+sfc_ef10_rx_prepared(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->prepared);
+ unsigned int completed = rxq->completed;
+ unsigned int i;
+
+ rxq->prepared -= n_rx_pkts;
+ rxq->completed = completed + n_rx_pkts;
+
+ for (i = 0; i < n_rx_pkts; ++i, ++completed)
+ rx_pkts[i] = rxq->sw_ring[completed & rxq->ptr_mask].mbuf;
+
+ return n_rx_pkts;
+}
+
+static void
+sfc_ef10_rx_ev_to_offloads(struct sfc_ef10_rxq *rxq, const efx_qword_t rx_ev,
+ struct rte_mbuf *m)
+{
+ uint32_t l2_ptype = 0;
+ uint32_t l3_ptype = 0;
+ uint32_t l4_ptype = 0;
+ uint64_t ol_flags = 0;
+
+ if (unlikely(EFX_TEST_QWORD_BIT(rx_ev, ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))
+ goto done;
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_ETH_TAG_CLASS)) {
+ case ESE_DZ_ETH_TAG_CLASS_NONE:
+ l2_ptype = RTE_PTYPE_L2_ETHER;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN1:
+ l2_ptype = RTE_PTYPE_L2_ETHER_VLAN;
+ break;
+ case ESE_DZ_ETH_TAG_CLASS_VLAN2:
+ l2_ptype = RTE_PTYPE_L2_ETHER_QINQ;
+ break;
+ default:
+ /* Unexpected Eth tag class */
+ SFC_ASSERT(false);
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L3_CLASS)) {
+ case ESE_DZ_L3_CLASS_IP4_FRAG:
+ l4_ptype = RTE_PTYPE_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP4:
+ l3_ptype = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH |
+ ((EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_IPCKSUM_ERR_LBN)) ?
+ PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+ break;
+ case ESE_DZ_L3_CLASS_IP6_FRAG:
+ l4_ptype |= RTE_PTYPE_L4_FRAG;
+ /* FALLTHROUGH */
+ case ESE_DZ_L3_CLASS_IP6:
+ l3_ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+ ol_flags |= PKT_RX_RSS_HASH;
+ break;
+ case ESE_DZ_L3_CLASS_ARP:
+ /* Override Layer 2 packet type */
+ l2_ptype = RTE_PTYPE_L2_ETHER_ARP;
+ break;
+ default:
+ /* Unexpected Layer 3 class */
+ SFC_ASSERT(false);
+ }
+
+ switch (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_L4_CLASS)) {
+ case ESE_DZ_L4_CLASS_TCP:
+ l4_ptype = RTE_PTYPE_L4_TCP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_DZ_L4_CLASS_UDP:
+ l4_ptype = RTE_PTYPE_L4_UDP;
+ ol_flags |=
+ (EFX_TEST_QWORD_BIT(rx_ev,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN)) ?
+ PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case ESE_DZ_L4_CLASS_UNKNOWN:
+ break;
+ default:
+ /* Unexpected Layer 4 class */
+ SFC_ASSERT(false);
+ }
+
+ /* Remove RSS hash offload flag if RSS is not enabled */
+ if (~rxq->flags & SFC_EF10_RXQ_RSS_HASH)
+ ol_flags &= ~PKT_RX_RSS_HASH;
+
+done:
+ m->ol_flags = ol_flags;
+ m->packet_type = l2_ptype | l3_ptype | l4_ptype;
+}
+
+static uint16_t
+sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
+{
+ return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
+}
+
+static uint32_t
+sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
+{
+ return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
+}
+
+static uint16_t
+sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
+ struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ const unsigned int ptr_mask = rxq->ptr_mask;
+ unsigned int completed = rxq->completed;
+ unsigned int ready;
+ struct sfc_ef10_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ struct rte_mbuf *m0;
+ uint16_t n_rx_pkts;
+ const uint8_t *pseudo_hdr;
+ uint16_t pkt_len;
+
+ ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - completed) &
+ EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
+ SFC_ASSERT(ready > 0);
+
+ if (rx_ev.eq_u64[0] &
+ rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
+ (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
+ SFC_ASSERT(rxq->prepared == 0);
+ rxq->completed += ready;
+ while (ready-- > 0) {
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ }
+ return 0;
+ }
+
+ n_rx_pkts = RTE_MIN(ready, nb_pkts);
+ rxq->prepared = ready - n_rx_pkts;
+ rxq->completed += n_rx_pkts;
+
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+
+ sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+
+ m = rxd->mbuf;
+
+ *rx_pkts++ = m;
+
+ RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
+ m->rearm_data[0] = rxq->rearm_data;
+
+ /* Classify packet based on Rx event */
+ sfc_ef10_rx_ev_to_offloads(rxq, rx_ev, m);
+
+ /* data_off already moved past pseudo header */
+ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Always get RSS hash from pseudo header to avoid
+ * condition/branching. If it is valid or not depends on
+ * PKT_RX_RSS_HASH in m->ol_flags.
+ */
+ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
+
+ if (ready == 1)
+ pkt_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
+ rxq->prefix_size;
+ else
+ pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(pkt_len > 0);
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+
+ SFC_ASSERT(m->next == NULL);
+
+ /* Remember mbuf to copy offload flags and packet type from */
+ m0 = m;
+ for (--ready; ready > 0; --ready) {
+ rxd = &rxq->sw_ring[completed++ & ptr_mask];
+
+ sfc_ef10_rx_prefetch_next(rxq, completed & ptr_mask);
+
+ m = rxd->mbuf;
+
+ if (ready > rxq->prepared)
+ *rx_pkts++ = m;
+
+ RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
+ sizeof(rxq->rearm_data));
+ m->rearm_data[0] = rxq->rearm_data;
+
+ /* Event-dependent information is the same */
+ m->ol_flags = m0->ol_flags;
+ m->packet_type = m0->packet_type;
+
+ /* data_off already moved past pseudo header */
+ pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+ /*
+ * Always get RSS hash from pseudo header to avoid
+ * condition/branching. If it is valid or not depends on
+ * PKT_RX_RSS_HASH in m->ol_flags.
+ */
+ m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
+
+ pkt_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
+ SFC_ASSERT(pkt_len > 0);
+ rte_pktmbuf_data_len(m) = pkt_len;
+ rte_pktmbuf_pkt_len(m) = pkt_len;
+
+ SFC_ASSERT(m->next == NULL);
+ }
+
+ return n_rx_pkts;
+}
+
+static bool
+sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
+{
+ *rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
+
+ if (!sfc_ef10_ev_present(*rx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_RX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling by the control path.
+ */
+ rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
+ sfc_ef10_rx_err(&rxq->dp.dpq,
+ "RxQ exception at EvQ read ptr %#x",
+ rxq->evq_read_ptr);
+ return false;
+ }
+
+ rxq->evq_read_ptr++;
+ return true;
+}
+
+static uint16_t
+sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
+ unsigned int evq_old_read_ptr;
+ uint16_t n_rx_pkts;
+ efx_qword_t rx_ev;
+
+ if (unlikely(rxq->flags &
+ (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
+ return 0;
+
+ n_rx_pkts = sfc_ef10_rx_prepared(rxq, rx_pkts, nb_pkts);
+
+ evq_old_read_ptr = rxq->evq_read_ptr;
+ while (n_rx_pkts != nb_pkts && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ n_rx_pkts += sfc_ef10_rx_process_event(rxq, rx_ev,
+ rx_pkts + n_rx_pkts,
+ nb_pkts - n_rx_pkts);
+ }
+
+ sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
+ rxq->evq_read_ptr);
+
+ /* It is not a problem if we refill in the case of exception */
+ sfc_ef10_rx_qrefill(rxq);
+
+ return n_rx_pkts;
+}
+
+static const uint32_t *
+sfc_ef10_supported_ptypes_get(void)
+{
+ static const uint32_t ef10_native_ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_ARP,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ef10_native_ptypes;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
+static unsigned int
+sfc_ef10_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
+{
+ /*
+ * Correct implementation requires EvQ polling and events
+ * processing (keeping all ready mbufs in prepared).
+ */
+ return -ENOTSUP;
+}
+
+
+static uint64_t
+sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
+{
+ struct rte_mbuf m;
+
+ memset(&m, 0, sizeof(m));
+
+ rte_mbuf_refcnt_set(&m, 1);
+ m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
+ m.nb_segs = 1;
+ m.port = port_id;
+
+ /* rearm_data covers structure members filled in above */
+ rte_compiler_barrier();
+ RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
+ return m.rearm_data[0];
+}
+
+static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
+static int
+sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct sfc_ef10_rxq *rxq;
+ int rc;
+
+ rc = EINVAL;
+ if (info->rxq_entries != info->evq_entries)
+ goto fail_rxq_args;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
+ info->rxq_entries,
+ sizeof(*rxq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_ring == NULL)
+ goto fail_desc_alloc;
+
+ rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
+ if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
+ rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
+ rxq->ptr_mask = info->rxq_entries - 1;
+ rxq->evq_hw_ring = info->evq_hw_ring;
+ rxq->refill_threshold = info->refill_threshold;
+ rxq->rearm_data =
+ sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
+ rxq->prefix_size = info->prefix_size;
+ rxq->buf_size = info->buf_size;
+ rxq->refill_mb_pool = info->refill_mb_pool;
+ rxq->rxq_hw_ring = info->rxq_hw_ring;
+ rxq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_RX_DESC_UPD_REG_OFST +
+ info->hw_index * ER_DZ_RX_DESC_UPD_REG_STEP;
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+fail_rxq_args:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
+static void
+sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
+static int
+sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->prepared = 0;
+ rxq->completed = rxq->added = 0;
+
+ sfc_ef10_rx_qrefill(rxq);
+
+ rxq->evq_read_ptr = evq_read_ptr;
+
+ rxq->flags |= SFC_EF10_RXQ_STARTED;
+ rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
+static void
+sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
+
+ *evq_read_ptr = rxq->evq_read_ptr;
+}
+
+static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
+static bool
+sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+
+ SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Rx event since we free all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
+static void
+sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ struct sfc_ef10_rx_sw_desc *rxd;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_ring[i & rxq->ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rxd->mbuf = NULL;
+ }
+
+ rxq->flags &= ~SFC_EF10_RXQ_STARTED;
+}
+
+struct sfc_dp_rx sfc_ef10_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
+ },
+ .features = 0,
+ .qcreate = sfc_ef10_rx_qcreate,
+ .qdestroy = sfc_ef10_rx_qdestroy,
+ .qstart = sfc_ef10_rx_qstart,
+ .qstop = sfc_ef10_rx_qstop,
+ .qrx_ev = sfc_ef10_rx_qrx_ev,
+ .qpurge = sfc_ef10_rx_qpurge,
+ .supported_ptypes_get = sfc_ef10_supported_ptypes_get,
+ .qdesc_npending = sfc_ef10_rx_qdesc_npending,
+ .pkt_burst = sfc_ef10_recv_pkts,
+};
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
new file mode 100644
index 00000000..bac9baa9
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -0,0 +1,560 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_dp_tx.h"
+#include "sfc_tweak.h"
+#include "sfc_kvargs.h"
+#include "sfc_ef10.h"
+
+#define sfc_ef10_tx_err(dpq, ...) \
+ SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
+
+/** Maximum length of the DMA descriptor data */
+#define SFC_EF10_TX_DMA_DESC_LEN_MAX \
+ ((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
+
+/**
+ * Maximum number of descriptors/buffers in the Tx ring.
+ * It should guarantee that corresponding event queue never overfill.
+ * EF10 native datapath uses event queue of the same size as Tx queue.
+ * Maximum number of events on datapath can be estimated as number of
+ * Tx queue entries (one event per Tx buffer in the worst case) plus
+ * Tx error and flush events.
+ */
+#define SFC_EF10_TXQ_LIMIT(_ndesc) \
+ ((_ndesc) - 1 /* head must not step on tail */ - \
+ (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
+ 1 /* Rx error */ - 1 /* flush */)
+
+struct sfc_ef10_tx_sw_desc {
+ struct rte_mbuf *mbuf;
+};
+
+struct sfc_ef10_txq {
+ unsigned int flags;
+#define SFC_EF10_TXQ_STARTED 0x1
+#define SFC_EF10_TXQ_NOT_RUNNING 0x2
+#define SFC_EF10_TXQ_EXCEPTION 0x4
+
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int completed;
+ unsigned int free_thresh;
+ unsigned int evq_read_ptr;
+ struct sfc_ef10_tx_sw_desc *sw_ring;
+ efx_qword_t *txq_hw_ring;
+ volatile void *doorbell;
+ efx_qword_t *evq_hw_ring;
+
+ /* Datapath transmit queue anchor */
+ struct sfc_dp_txq dp;
+};
+
+static inline struct sfc_ef10_txq *
+sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
+{
+ return container_of(dp_txq, struct sfc_ef10_txq, dp);
+}
+
+static bool
+sfc_ef10_tx_get_event(struct sfc_ef10_txq *txq, efx_qword_t *tx_ev)
+{
+ volatile efx_qword_t *evq_hw_ring = txq->evq_hw_ring;
+
+ /*
+ * Exception flag is set when reap is done.
+ * It is never done twice per packet burst get and absence of
+ * the flag is checked on burst get entry.
+ */
+ SFC_ASSERT((txq->flags & SFC_EF10_TXQ_EXCEPTION) == 0);
+
+ *tx_ev = evq_hw_ring[txq->evq_read_ptr & txq->ptr_mask];
+
+ if (!sfc_ef10_ev_present(*tx_ev))
+ return false;
+
+ if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
+ FSE_AZ_EV_CODE_TX_EV)) {
+ /*
+ * Do not move read_ptr to keep the event for exception
+ * handling by the control path.
+ */
+ txq->flags |= SFC_EF10_TXQ_EXCEPTION;
+ sfc_ef10_tx_err(&txq->dp.dpq,
+ "TxQ exception at EvQ read ptr %#x",
+ txq->evq_read_ptr);
+ return false;
+ }
+
+ txq->evq_read_ptr++;
+ return true;
+}
+
+static void
+sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+{
+ const unsigned int old_read_ptr = txq->evq_read_ptr;
+ const unsigned int ptr_mask = txq->ptr_mask;
+ unsigned int completed = txq->completed;
+ unsigned int pending = completed;
+ const unsigned int curr_done = pending - 1;
+ unsigned int anew_done = curr_done;
+ efx_qword_t tx_ev;
+
+ while (sfc_ef10_tx_get_event(txq, &tx_ev)) {
+ /*
+ * DROP_EVENT is an internal to the NIC, software should
+ * never see it and, therefore, may ignore it.
+ */
+
+ /* Update the latest done descriptor */
+ anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
+ }
+ pending += (anew_done - curr_done) & ptr_mask;
+
+ if (pending != completed) {
+ do {
+ struct sfc_ef10_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & ptr_mask];
+
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free(txd->mbuf);
+ txd->mbuf = NULL;
+ }
+ } while (++completed != pending);
+
+ txq->completed = completed;
+ }
+
+ sfc_ef10_ev_qclear(txq->evq_hw_ring, ptr_mask, old_read_ptr,
+ txq->evq_read_ptr);
+}
+
+static void
+sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop,
+ efx_qword_t *edp)
+{
+ EFX_POPULATE_QWORD_4(*edp,
+ ESF_DZ_TX_KER_TYPE, 0,
+ ESF_DZ_TX_KER_CONT, !eop,
+ ESF_DZ_TX_KER_BYTE_CNT, size,
+ ESF_DZ_TX_KER_BUF_ADDR, addr);
+}
+
+static inline void
+sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
+ unsigned int pushed)
+{
+ efx_qword_t desc;
+ efx_oword_t oword;
+
+ /*
+ * This improves performance by pushing a TX descriptor at the same
+ * time as the doorbell. The descriptor must be added to the TXQ,
+ * so that can be used if the hardware decides not to use the pushed
+ * descriptor.
+ */
+ desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
+ EFX_POPULATE_OWORD_3(oword,
+ ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
+ ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+ ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+ /* DMA sync to device is not required */
+
+ /*
+ * rte_io_wmb() which guarantees that the STORE operations
+ * (i.e. Tx and event descriptor updates) that precede
+ * the rte_io_wmb() call are visible to NIC before the STORE
+ * operations that follow it (i.e. doorbell write).
+ */
+ rte_io_wmb();
+
+ *(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
+}
+
+static unsigned int
+sfc_ef10_tx_pkt_descs_max(const struct rte_mbuf *m)
+{
+ unsigned int extra_descs_per_seg;
+ unsigned int extra_descs_per_pkt;
+
+ /*
+ * VLAN offload is not supported yet, so no extra descriptors
+ * are required for VLAN option descriptor.
+ */
+
+/** Maximum length of the mbuf segment data */
+#define SFC_MBUF_SEG_LEN_MAX UINT16_MAX
+ RTE_BUILD_BUG_ON(sizeof(m->data_len) != 2);
+
+ /*
+ * Each segment is already counted once below. So, calculate
+ * how many extra DMA descriptors may be required per segment in
+ * the worst case because of maximum DMA descriptor length limit.
+ * If maximum segment length is less or equal to maximum DMA
+ * descriptor length, no extra DMA descriptors are required.
+ */
+ extra_descs_per_seg =
+ (SFC_MBUF_SEG_LEN_MAX - 1) / SFC_EF10_TX_DMA_DESC_LEN_MAX;
+
+/** Maximum length of the packet */
+#define SFC_MBUF_PKT_LEN_MAX UINT32_MAX
+ RTE_BUILD_BUG_ON(sizeof(m->pkt_len) != 4);
+
+ /*
+ * One more limitation on maximum number of extra DMA descriptors
+ * comes from slicing entire packet because of DMA descriptor length
+ * limit taking into account that there is at least one segment
+ * which is already counted below (so division of the maximum
+ * packet length minus one with round down).
+ * TSO is not supported yet, so packet length is limited by
+ * maximum PDU size.
+ */
+ extra_descs_per_pkt =
+ (RTE_MIN((unsigned int)EFX_MAC_PDU_MAX,
+ SFC_MBUF_PKT_LEN_MAX) - 1) /
+ SFC_EF10_TX_DMA_DESC_LEN_MAX;
+
+ return m->nb_segs + RTE_MIN(m->nb_segs * extra_descs_per_seg,
+ extra_descs_per_pkt);
+}
+
+static uint16_t
+sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int dma_desc_space;
+ bool reap_done;
+ struct rte_mbuf **pktp;
+ struct rte_mbuf **pktp_end;
+
+ if (unlikely(txq->flags &
+ (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
+ return 0;
+
+ ptr_mask = txq->ptr_mask;
+ added = txq->added;
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+
+ reap_done = (dma_desc_space < txq->free_thresh);
+ if (reap_done) {
+ sfc_ef10_tx_reap(txq);
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+ }
+
+ for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
+ pktp != pktp_end;
+ ++pktp) {
+ struct rte_mbuf *m_seg = *pktp;
+ unsigned int pkt_start = added;
+ uint32_t pkt_len;
+
+ if (likely(pktp + 1 != pktp_end))
+ rte_mbuf_prefetch_part1(pktp[1]);
+
+ if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space) {
+ if (reap_done)
+ break;
+
+ /* Push already prepared descriptors before polling */
+ if (added != txq->added) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+ sfc_ef10_tx_reap(txq);
+ reap_done = true;
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+ if (sfc_ef10_tx_pkt_descs_max(m_seg) > dma_desc_space)
+ break;
+ }
+
+ pkt_len = m_seg->pkt_len;
+ do {
+ phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+ unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+
+ SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ pkt_len -= seg_len;
+
+ sfc_ef10_tx_qdesc_dma_create(seg_addr,
+ seg_len, (pkt_len == 0),
+ &txq->txq_hw_ring[added & ptr_mask]);
+ ++added;
+
+ } while ((m_seg = m_seg->next) != 0);
+
+ dma_desc_space -= (added - pkt_start);
+
+ /* Assign mbuf to the last used desc */
+ txq->sw_ring[(added - 1) & ptr_mask].mbuf = *pktp;
+ }
+
+ if (likely(added != txq->added)) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_ef10_tx_reap(txq);
+#endif
+
+ return pktp - &tx_pkts[0];
+}
+
+static uint16_t
+sfc_ef10_simple_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+ unsigned int ptr_mask;
+ unsigned int added;
+ unsigned int dma_desc_space;
+ bool reap_done;
+ struct rte_mbuf **pktp;
+ struct rte_mbuf **pktp_end;
+
+ if (unlikely(txq->flags &
+ (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION)))
+ return 0;
+
+ ptr_mask = txq->ptr_mask;
+ added = txq->added;
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+
+ reap_done = (dma_desc_space < RTE_MAX(txq->free_thresh, nb_pkts));
+ if (reap_done) {
+ sfc_ef10_tx_reap(txq);
+ dma_desc_space = SFC_EF10_TXQ_LIMIT(ptr_mask + 1) -
+ (added - txq->completed);
+ }
+
+ pktp_end = &tx_pkts[MIN(nb_pkts, dma_desc_space)];
+ for (pktp = &tx_pkts[0]; pktp != pktp_end; ++pktp) {
+ struct rte_mbuf *pkt = *pktp;
+ unsigned int id = added & ptr_mask;
+
+ SFC_ASSERT(rte_pktmbuf_data_len(pkt) <=
+ SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+ sfc_ef10_tx_qdesc_dma_create(rte_mbuf_data_dma_addr(pkt),
+ rte_pktmbuf_data_len(pkt),
+ true, &txq->txq_hw_ring[id]);
+
+ txq->sw_ring[id].mbuf = pkt;
+
+ ++added;
+ }
+
+ if (likely(added != txq->added)) {
+ sfc_ef10_tx_qpush(txq, added, txq->added);
+ txq->added = added;
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_ef10_tx_reap(txq);
+#endif
+
+ return pktp - &tx_pkts[0];
+}
+
+
+static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
+static int
+sfc_ef10_tx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp)
+{
+ struct sfc_ef10_txq *txq;
+ int rc;
+
+ rc = EINVAL;
+ if (info->txq_entries != info->evq_entries)
+ goto fail_bad_args;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
+ info->txq_entries,
+ sizeof(*txq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL)
+ goto fail_sw_ring_alloc;
+
+ txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
+ txq->ptr_mask = info->txq_entries - 1;
+ txq->free_thresh = info->free_thresh;
+ txq->txq_hw_ring = info->txq_hw_ring;
+ txq->doorbell = (volatile uint8_t *)info->mem_bar +
+ ER_DZ_TX_DESC_UPD_REG_OFST +
+ info->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
+ txq->evq_hw_ring = info->evq_hw_ring;
+
+ *dp_txqp = &txq->dp;
+ return 0;
+
+fail_sw_ring_alloc:
+ rte_free(txq);
+
+fail_txq_alloc:
+fail_bad_args:
+ return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
+static void
+sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
+static int
+sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
+ unsigned int txq_desc_index)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ txq->evq_read_ptr = evq_read_ptr;
+ txq->added = txq->completed = txq_desc_index;
+
+ txq->flags |= SFC_EF10_TXQ_STARTED;
+ txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
+
+ return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
+static void
+sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
+
+ *evq_read_ptr = txq->evq_read_ptr;
+}
+
+static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
+static bool
+sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
+{
+ __rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+ SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
+
+ /*
+ * It is safe to ignore Tx event since we reap all mbufs on
+ * queue purge anyway.
+ */
+
+ return false;
+}
+
+static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
+static void
+sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+ unsigned int txds;
+
+ for (txds = 0; txds <= txq->ptr_mask; ++txds) {
+ if (txq->sw_ring[txds].mbuf != NULL) {
+ rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
+ txq->sw_ring[txds].mbuf = NULL;
+ }
+ }
+
+ txq->flags &= ~SFC_EF10_TXQ_STARTED;
+}
+
+struct sfc_dp_tx sfc_ef10_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10,
+ .type = SFC_DP_TX,
+ .hw_fw_caps = SFC_DP_HW_FW_CAP_EF10,
+ },
+ .features = SFC_DP_TX_FEAT_MULTI_SEG,
+ .qcreate = sfc_ef10_tx_qcreate,
+ .qdestroy = sfc_ef10_tx_qdestroy,
+ .qstart = sfc_ef10_tx_qstart,
+ .qtx_ev = sfc_ef10_tx_qtx_ev,
+ .qstop = sfc_ef10_tx_qstop,
+ .qreap = sfc_ef10_tx_qreap,
+ .pkt_burst = sfc_ef10_xmit_pkts,
+};
+
+struct sfc_dp_tx sfc_ef10_simple_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EF10_SIMPLE,
+ .type = SFC_DP_TX,
+ },
+ .features = 0,
+ .qcreate = sfc_ef10_tx_qcreate,
+ .qdestroy = sfc_ef10_tx_qdestroy,
+ .qstart = sfc_ef10_tx_qstart,
+ .qtx_ev = sfc_ef10_tx_qtx_ev,
+ .qstop = sfc_ef10_tx_qstop,
+ .qreap = sfc_ef10_tx_qreap,
+ .pkt_burst = sfc_ef10_simple_xmit_pkts,
+};
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
new file mode 100644
index 00000000..4c9335f3
--- /dev/null
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -0,0 +1,1642 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_pci.h>
+#include <rte_errno.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+#include "sfc_flow.h"
+#include "sfc_dp.h"
+#include "sfc_dp_rx.h"
+
+static struct sfc_dp_list sfc_dp_head =
+ TAILQ_HEAD_INITIALIZER(sfc_dp_head);
+
+static int
+sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ efx_nic_fw_info_t enfi;
+ int ret;
+ int rc;
+
+ /*
+ * Return value of the callback is likely supposed to be
+ * equal to or greater than 0, nevertheless, if an error
+ * occurs, it will be desirable to pass it to the caller
+ */
+ if ((fw_version == NULL) || (fw_size == 0))
+ return -EINVAL;
+
+ rc = efx_nic_get_fw_version(sa->nic, &enfi);
+ if (rc != 0)
+ return -rc;
+
+ ret = snprintf(fw_version, fw_size,
+ "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
+ enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
+ enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
+ if (ret < 0)
+ return ret;
+
+ if (enfi.enfi_dpcpu_fw_ids_valid) {
+ size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
+ int ret_extra;
+
+ ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
+ fw_size - dpcpu_fw_ids_offset,
+ " rx%" PRIx16 " tx%" PRIx16,
+ enfi.enfi_rx_dpcpu_fw_id,
+ enfi.enfi_tx_dpcpu_fw_id);
+ if (ret_extra < 0)
+ return ret_extra;
+
+ ret += ret_extra;
+ }
+
+ if (fw_size < (size_t)(++ret))
+ return ret;
+ else
+ return 0;
+}
+
+static void
+sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+
+ sfc_log_init(sa, "entry");
+
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
+
+ /* Autonegotiation may be disabled */
+ dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_1G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_10G;
+ if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
+ dev_info->speed_capa |= ETH_LINK_SPEED_40G;
+
+ dev_info->max_rx_queues = sa->rxq_max;
+ dev_info->max_tx_queues = sa->txq_max;
+
+ /* By default packets are dropped if no descriptors are available */
+ dev_info->default_rxconf.rx_drop_en = 1;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
+ if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) ||
+ !encp->enc_hw_tx_insert_vlan_enabled)
+ dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
+ else
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
+
+ if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
+ dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
+
+#if EFSYS_OPT_RX_SCALE
+ if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
+ dev_info->reta_size = EFX_RSS_TBL_SIZE;
+ dev_info->hash_key_size = SFC_RSS_KEY_SIZE;
+ dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS;
+ }
+#endif
+
+ if (sa->tso)
+ dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
+ dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
+ /* The RXQ hardware requires that the descriptor count is a power
+ * of 2, but rx_desc_lim cannot properly describe that constraint.
+ */
+ dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
+
+ dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
+ dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
+ /*
+ * The TXQ hardware requires that the descriptor count is a power
+ * of 2, but tx_desc_lim cannot properly describe that constraint
+ */
+ dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
+}
+
+static const uint32_t *
+sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ return sa->dp_rx->supported_ptypes_get();
+}
+
+static int
+sfc_dev_configure(struct rte_eth_dev *dev)
+{
+ struct rte_eth_dev_data *dev_data = dev->data;
+ struct sfc_adapter *sa = dev_data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
+ dev_data->nb_rx_queues, dev_data->nb_tx_queues);
+
+ sfc_adapter_lock(sa);
+ switch (sa->state) {
+ case SFC_ADAPTER_CONFIGURED:
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_INITIALIZED:
+ rc = sfc_configure(sa);
+ break;
+ default:
+ sfc_err(sa, "unexpected adapter state %u to configure",
+ sa->state);
+ rc = EINVAL;
+ break;
+ }
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done %d", rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_start(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ rc = sfc_start(sa);
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done %d", rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_eth_link *dev_link = &dev->data->dev_link;
+ struct rte_eth_link old_link;
+ struct rte_eth_link current_link;
+
+ sfc_log_init(sa, "entry");
+
+retry:
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+ *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &current_link);
+ if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ *(uint64_t *)&old_link,
+ *(uint64_t *)&current_link))
+ goto retry;
+ } else if (wait_to_complete) {
+ efx_link_mode_t link_mode;
+
+ if (efx_port_poll(sa->nic, &link_mode) != 0)
+ link_mode = EFX_LINK_UNKNOWN;
+ sfc_port_link_mode_to_info(link_mode, &current_link);
+
+ if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ *(uint64_t *)&old_link,
+ *(uint64_t *)&current_link))
+ goto retry;
+ } else {
+ sfc_ev_mgmt_qpoll(sa);
+ *(int64_t *)&current_link =
+ rte_atomic64_read((rte_atomic64_t *)dev_link);
+ }
+
+ if (old_link.link_status != current_link.link_status)
+ sfc_info(sa, "Link status is %s",
+ current_link.link_status ? "UP" : "DOWN");
+
+ return old_link.link_status == current_link.link_status ? 0 : -1;
+}
+
+static void
+sfc_dev_stop(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ sfc_stop(sa);
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+}
+
+static int
+sfc_dev_set_link_up(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ rc = sfc_start(sa);
+ sfc_adapter_unlock(sa);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static int
+sfc_dev_set_link_down(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ sfc_stop(sa);
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static void
+sfc_dev_close(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+ switch (sa->state) {
+ case SFC_ADAPTER_STARTED:
+ sfc_stop(sa);
+ SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_CONFIGURED:
+ sfc_close(sa);
+ SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
+ /* FALLTHROUGH */
+ case SFC_ADAPTER_INITIALIZED:
+ break;
+ default:
+ sfc_err(sa, "unexpected adapter state %u on close", sa->state);
+ break;
+ }
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+}
+
+static void
+sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
+ boolean_t enabled)
+{
+ struct sfc_port *port;
+ boolean_t *toggle;
+ struct sfc_adapter *sa = dev->data->dev_private;
+ boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
+ const char *desc = (allmulti) ? "all-multi" : "promiscuous";
+
+ sfc_adapter_lock(sa);
+
+ port = &sa->port;
+ toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
+
+ if (*toggle != enabled) {
+ *toggle = enabled;
+
+ if ((sa->state == SFC_ADAPTER_STARTED) &&
+ (sfc_set_rx_mode(sa) != 0)) {
+ *toggle = !(enabled);
+ sfc_warn(sa, "Failed to %s %s mode",
+ ((enabled) ? "enable" : "disable"), desc);
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_dev_promisc_enable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
+}
+
+static void
+sfc_dev_promisc_disable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
+}
+
+static void
+sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
+}
+
+static void
+sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
+{
+ sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
+}
+
+static int
+sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
+ rx_queue_id, nb_rx_desc, socket_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
+ rx_conf, mb_pool);
+ if (rc != 0)
+ goto fail_rx_qinit;
+
+ dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_rx_qinit:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_rx_queue_release(void *queue)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+ struct sfc_rxq *rxq;
+ struct sfc_adapter *sa;
+ unsigned int sw_index;
+
+ if (dp_rxq == NULL)
+ return;
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ sa = rxq->evq->sa;
+ sfc_adapter_lock(sa);
+
+ sw_index = sfc_rxq_sw_index(rxq);
+
+ sfc_log_init(sa, "RxQ=%u", sw_index);
+
+ sa->eth_dev->data->rx_queues[sw_index] = NULL;
+
+ sfc_rx_qfini(sa, sw_index);
+
+ sfc_adapter_unlock(sa);
+}
+
+static int
+sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
+ tx_queue_id, nb_tx_desc, socket_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
+ if (rc != 0)
+ goto fail_tx_qinit;
+
+ dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_tx_qinit:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_tx_queue_release(void *queue)
+{
+ struct sfc_dp_txq *dp_txq = queue;
+ struct sfc_txq *txq;
+ unsigned int sw_index;
+ struct sfc_adapter *sa;
+
+ if (dp_txq == NULL)
+ return;
+
+ txq = sfc_txq_by_dp_txq(dp_txq);
+ sw_index = sfc_txq_sw_index(txq);
+
+ SFC_ASSERT(txq->evq != NULL);
+ sa = txq->evq->sa;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
+ sa->eth_dev->data->tx_queues[sw_index] = NULL;
+
+ sfc_tx_qfini(sa, sw_index);
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ if (sfc_port_update_mac_stats(sa) != 0)
+ goto unlock;
+
+ mac_stats = port->mac_stats_buf;
+
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
+ EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
+ stats->ipackets =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
+ stats->opackets =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
+ stats->ibytes =
+ mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
+ stats->obytes =
+ mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
+ mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
+ stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
+ stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
+ stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
+ } else {
+ stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
+ stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
+ stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
+ stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
+ /*
+ * Take into account stats which are whenever supported
+ * on EF10. If some stat is not supported by current
+ * firmware variant or HW revision, it is guaranteed
+ * to be zero in mac_stats.
+ */
+ stats->imissed =
+ mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
+ mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
+ mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
+ mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
+ mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
+ mac_stats[EFX_MAC_PM_TRUNC_QBB] +
+ mac_stats[EFX_MAC_PM_DISCARD_QBB] +
+ mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
+ mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
+ mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
+ stats->ierrors =
+ mac_stats[EFX_MAC_RX_FCS_ERRORS] +
+ mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
+ mac_stats[EFX_MAC_RX_JABBER_PKTS];
+ /* no oerrors counters supported on EF10 */
+ }
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+}
+
+static void
+sfc_stats_reset(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ /*
+ * The operation cannot be done if port is not started; it
+ * will be scheduled to be done during the next port start
+ */
+ port->mac_stats_reset_pending = B_TRUE;
+ return;
+ }
+
+ rc = sfc_port_reset_mac_stats(sa);
+ if (rc != 0)
+ sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
+}
+
+static int
+sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
+ unsigned int xstats_count)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint64_t *mac_stats;
+ int rc;
+ unsigned int i;
+ int nstats = 0;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+
+ rc = sfc_port_update_mac_stats(sa);
+ if (rc != 0) {
+ SFC_ASSERT(rc > 0);
+ nstats = -rc;
+ goto unlock;
+ }
+
+ mac_stats = port->mac_stats_buf;
+
+ for (i = 0; i < EFX_MAC_NSTATS; ++i) {
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
+ if (xstats != NULL && nstats < (int)xstats_count) {
+ xstats[nstats].id = nstats;
+ xstats[nstats].value = mac_stats[i];
+ }
+ nstats++;
+ }
+ }
+
+unlock:
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return nstats;
+}
+
+static int
+sfc_xstats_get_names(struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int xstats_count)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int i;
+ unsigned int nstats = 0;
+
+ for (i = 0; i < EFX_MAC_NSTATS; ++i) {
+ if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
+ if (xstats_names != NULL && nstats < xstats_count)
+ strncpy(xstats_names[nstats].name,
+ efx_mac_stat_name(sa->nic, i),
+ sizeof(xstats_names[0].name));
+ nstats++;
+ }
+ }
+
+ return nstats;
+}
+
+static int
+sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int wanted_fc, link_fc;
+
+ memset(fc_conf, 0, sizeof(*fc_conf));
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED)
+ efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
+ else
+ link_fc = sa->port.flow_ctrl;
+
+ switch (link_fc) {
+ case 0:
+ fc_conf->mode = RTE_FC_NONE;
+ break;
+ case EFX_FCNTL_RESPOND:
+ fc_conf->mode = RTE_FC_RX_PAUSE;
+ break;
+ case EFX_FCNTL_GENERATE:
+ fc_conf->mode = RTE_FC_TX_PAUSE;
+ break;
+ case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
+ fc_conf->mode = RTE_FC_FULL;
+ break;
+ default:
+ sfc_err(sa, "%s: unexpected flow control value %#x",
+ __func__, link_fc);
+ }
+
+ fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ unsigned int fcntl;
+ int rc;
+
+ if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
+ fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
+ fc_conf->mac_ctrl_frame_fwd != 0) {
+ sfc_err(sa, "unsupported flow control settings specified");
+ rc = EINVAL;
+ goto fail_inval;
+ }
+
+ switch (fc_conf->mode) {
+ case RTE_FC_NONE:
+ fcntl = 0;
+ break;
+ case RTE_FC_RX_PAUSE:
+ fcntl = EFX_FCNTL_RESPOND;
+ break;
+ case RTE_FC_TX_PAUSE:
+ fcntl = EFX_FCNTL_GENERATE;
+ break;
+ case RTE_FC_FULL:
+ fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ break;
+ default:
+ rc = EINVAL;
+ goto fail_inval;
+ }
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
+ if (rc != 0)
+ goto fail_mac_fcntl_set;
+ }
+
+ port->flow_ctrl = fcntl;
+ port->flow_ctrl_autoneg = fc_conf->autoneg;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_mac_fcntl_set:
+ sfc_adapter_unlock(sa);
+fail_inval:
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ size_t pdu = EFX_MAC_PDU(mtu);
+ size_t old_pdu;
+ int rc;
+
+ sfc_log_init(sa, "mtu=%u", mtu);
+
+ rc = EINVAL;
+ if (pdu < EFX_MAC_PDU_MIN) {
+ sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
+ (unsigned int)mtu, (unsigned int)pdu,
+ EFX_MAC_PDU_MIN);
+ goto fail_inval;
+ }
+ if (pdu > EFX_MAC_PDU_MAX) {
+ sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
+ (unsigned int)mtu, (unsigned int)pdu,
+ EFX_MAC_PDU_MAX);
+ goto fail_inval;
+ }
+
+ sfc_adapter_lock(sa);
+
+ if (pdu != sa->port.pdu) {
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ sfc_stop(sa);
+
+ old_pdu = sa->port.pdu;
+ sa->port.pdu = pdu;
+ rc = sfc_start(sa);
+ if (rc != 0)
+ goto fail_start;
+ } else {
+ sa->port.pdu = pdu;
+ }
+ }
+
+ /*
+ * The driver does not use it, but other PMDs update jumbo_frame
+ * flag and max_rx_pkt_len when MTU is set.
+ */
+ dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
+
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_start:
+ sa->port.pdu = old_pdu;
+ if (sfc_start(sa) != 0)
+ sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
+ "PDU max size - port is stopped",
+ (unsigned int)pdu, (unsigned int)old_pdu);
+ sfc_adapter_unlock(sa);
+
+fail_inval:
+ sfc_log_init(sa, "failed %d", rc);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+static void
+sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ int rc;
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state != SFC_ADAPTER_STARTED) {
+ sfc_info(sa, "the port is not started");
+ sfc_info(sa, "the new MAC address will be set on port start");
+
+ goto unlock;
+ }
+
+ if (encp->enc_allow_set_mac_with_installed_filters) {
+ rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
+ if (rc != 0) {
+ sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
+ goto unlock;
+ }
+
+ /*
+ * Changing the MAC address by means of MCDI request
+ * has no effect on received traffic, therefore
+ * we also need to update unicast filters
+ */
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ sfc_err(sa, "cannot set filter (rc = %u)", rc);
+ } else {
+ sfc_warn(sa, "cannot set MAC address with filters installed");
+ sfc_warn(sa, "adapter will be restarted to pick the new MAC");
+ sfc_warn(sa, "(some traffic may be dropped)");
+
+ /*
+ * Since setting MAC address with filters installed is not
+ * allowed on the adapter, one needs to simply restart adapter
+ * so that the new MAC address will be taken from an outer
+ * storage and set flawlessly by means of sfc_start() call
+ */
+ sfc_stop(sa);
+ rc = sfc_start(sa);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
+ }
+
+unlock:
+ sfc_adapter_unlock(sa);
+}
+
+
+static int
+sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_port *port = &sa->port;
+ uint8_t *mc_addrs = port->mcast_addrs;
+ int rc;
+ unsigned int i;
+
+ if (mc_addrs == NULL)
+ return -ENOBUFS;
+
+ if (nb_mc_addr > port->max_mcast_addrs) {
+ sfc_err(sa, "too many multicast addresses: %u > %u",
+ nb_mc_addr, port->max_mcast_addrs);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < nb_mc_addr; ++i) {
+ (void)rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ mc_addrs += EFX_MAC_ADDR_LEN;
+ }
+
+ port->nb_mcast_addrs = nb_mc_addr;
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return 0;
+
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
+
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static void
+sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(rx_queue_id < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[rx_queue_id];
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq != NULL);
+
+ qinfo->mp = rxq->refill_mb_pool;
+ qinfo->conf.rx_free_thresh = rxq->refill_threshold;
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
+ qinfo->scattered_rx = (rxq_info->type == EFX_RXQ_TYPE_SCATTER);
+ qinfo->nb_desc = rxq_info->entries;
+
+ sfc_adapter_unlock(sa);
+}
+
+static void
+sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_txq_info *txq_info;
+
+ sfc_adapter_lock(sa);
+
+ SFC_ASSERT(tx_queue_id < sa->txq_count);
+
+ txq_info = &sa->txq_info[tx_queue_id];
+ SFC_ASSERT(txq_info->txq != NULL);
+
+ memset(qinfo, 0, sizeof(*qinfo));
+
+ qinfo->conf.txq_flags = txq_info->txq->flags;
+ qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
+ qinfo->conf.tx_deferred_start = txq_info->deferred_start;
+ qinfo->nb_desc = txq_info->entries;
+
+ sfc_adapter_unlock(sa);
+}
+
+static uint32_t
+sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ return sfc_rx_qdesc_npending(sa, rx_queue_id);
+}
+
+static int
+sfc_rx_descriptor_done(void *queue, uint16_t offset)
+{
+ struct sfc_dp_rxq *dp_rxq = queue;
+
+ return sfc_rx_qdesc_done(dp_rxq, offset);
+}
+
+static int
+sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = EINVAL;
+ if (sa->state != SFC_ADAPTER_STARTED)
+ goto fail_not_started;
+
+ rc = sfc_rx_qstart(sa, rx_queue_id);
+ if (rc != 0)
+ goto fail_rx_qstart;
+
+ sa->rxq_info[rx_queue_id].deferred_started = B_TRUE;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_rx_qstart:
+fail_not_started:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "RxQ=%u", rx_queue_id);
+
+ sfc_adapter_lock(sa);
+ sfc_rx_qstop(sa, rx_queue_id);
+
+ sa->rxq_info[rx_queue_id].deferred_started = B_FALSE;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ rc = EINVAL;
+ if (sa->state != SFC_ADAPTER_STARTED)
+ goto fail_not_started;
+
+ rc = sfc_tx_qstart(sa, tx_queue_id);
+ if (rc != 0)
+ goto fail_tx_qstart;
+
+ sa->txq_info[tx_queue_id].deferred_started = B_TRUE;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+
+fail_tx_qstart:
+
+fail_not_started:
+ sfc_adapter_unlock(sa);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "TxQ = %u", tx_queue_id);
+
+ sfc_adapter_lock(sa);
+
+ sfc_tx_qstop(sa, tx_queue_id);
+
+ sa->txq_info[tx_queue_id].deferred_started = B_FALSE;
+
+ sfc_adapter_unlock(sa);
+ return 0;
+}
+
+#if EFSYS_OPT_RX_SCALE
+static int
+sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ return -ENOTSUP;
+
+ if (sa->rss_channels == 0)
+ return -EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ /*
+ * Mapping of hash configuration between RTE and EFX is not one-to-one,
+ * hence, conversion is done here to derive a correct set of ETH_RSS
+ * flags which corresponds to the active EFX configuration stored
+ * locally in 'sfc_adapter' and kept up-to-date
+ */
+ rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types);
+ rss_conf->rss_key_len = SFC_RSS_KEY_SIZE;
+ if (rss_conf->rss_key != NULL)
+ rte_memcpy(rss_conf->rss_key, sa->rss_key, SFC_RSS_KEY_SIZE);
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int efx_hash_types;
+ int rc = 0;
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ sfc_err(sa, "RSS is not available");
+ return -ENOTSUP;
+ }
+
+ if (sa->rss_channels == 0) {
+ sfc_err(sa, "RSS is not configured");
+ return -EINVAL;
+ }
+
+ if ((rss_conf->rss_key != NULL) &&
+ (rss_conf->rss_key_len != sizeof(sa->rss_key))) {
+ sfc_err(sa, "RSS key size is wrong (should be %lu)",
+ sizeof(sa->rss_key));
+ return -EINVAL;
+ }
+
+ if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
+ sfc_err(sa, "unsupported hash functions requested");
+ return -EINVAL;
+ }
+
+ sfc_adapter_lock(sa);
+
+ efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf);
+
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ efx_hash_types, B_TRUE);
+ if (rc != 0)
+ goto fail_scale_mode_set;
+
+ if (rss_conf->rss_key != NULL) {
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_rx_scale_key_set(sa->nic, rss_conf->rss_key,
+ sizeof(sa->rss_key));
+ if (rc != 0)
+ goto fail_scale_key_set;
+ }
+
+ rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key));
+ }
+
+ sa->rss_hash_types = efx_hash_types;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+
+fail_scale_key_set:
+ if (efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ sa->rss_hash_types, B_TRUE) != 0)
+ sfc_err(sa, "failed to restore RSS mode");
+
+fail_scale_mode_set:
+ sfc_adapter_unlock(sa);
+ return -rc;
+}
+
+static int
+sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int entry;
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE)
+ return -ENOTSUP;
+
+ if (sa->rss_channels == 0)
+ return -EINVAL;
+
+ if (reta_size != EFX_RSS_TBL_SIZE)
+ return -EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ for (entry = 0; entry < reta_size; entry++) {
+ int grp = entry / RTE_RETA_GROUP_SIZE;
+ int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+
+ if ((reta_conf[grp].mask >> grp_idx) & 1)
+ reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry];
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
+static int
+sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int *rss_tbl_new;
+ uint16_t entry;
+ int rc;
+
+
+ if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
+ sfc_err(sa, "RSS is not available");
+ return -ENOTSUP;
+ }
+
+ if (sa->rss_channels == 0) {
+ sfc_err(sa, "RSS is not configured");
+ return -EINVAL;
+ }
+
+ if (reta_size != EFX_RSS_TBL_SIZE) {
+ sfc_err(sa, "RETA size is wrong (should be %u)",
+ EFX_RSS_TBL_SIZE);
+ return -EINVAL;
+ }
+
+ rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0);
+ if (rss_tbl_new == NULL)
+ return -ENOMEM;
+
+ sfc_adapter_lock(sa);
+
+ rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl));
+
+ for (entry = 0; entry < reta_size; entry++) {
+ int grp_idx = entry % RTE_RETA_GROUP_SIZE;
+ struct rte_eth_rss_reta_entry64 *grp;
+
+ grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
+
+ if (grp->mask & (1ull << grp_idx)) {
+ if (grp->reta[grp_idx] >= sa->rss_channels) {
+ rc = EINVAL;
+ goto bad_reta_entry;
+ }
+ rss_tbl_new[entry] = grp->reta[grp_idx];
+ }
+ }
+
+ rc = efx_rx_scale_tbl_set(sa->nic, rss_tbl_new, EFX_RSS_TBL_SIZE);
+ if (rc == 0)
+ rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
+
+bad_reta_entry:
+ sfc_adapter_unlock(sa);
+
+ rte_free(rss_tbl_new);
+
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+#endif
+
+static int
+sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc = ENOTSUP;
+
+ sfc_log_init(sa, "entry");
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NONE:
+ sfc_err(sa, "Global filters configuration not supported");
+ break;
+ case RTE_ETH_FILTER_MACVLAN:
+ sfc_err(sa, "MACVLAN filters not supported");
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ sfc_err(sa, "EtherType filters not supported");
+ break;
+ case RTE_ETH_FILTER_FLEXIBLE:
+ sfc_err(sa, "Flexible filters not supported");
+ break;
+ case RTE_ETH_FILTER_SYN:
+ sfc_err(sa, "SYN filters not supported");
+ break;
+ case RTE_ETH_FILTER_NTUPLE:
+ sfc_err(sa, "NTUPLE filters not supported");
+ break;
+ case RTE_ETH_FILTER_TUNNEL:
+ sfc_err(sa, "Tunnel filters not supported");
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ sfc_err(sa, "Flow Director filters not supported");
+ break;
+ case RTE_ETH_FILTER_HASH:
+ sfc_err(sa, "Hash filters not supported");
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET) {
+ rc = EINVAL;
+ } else {
+ *(const void **)arg = &sfc_flow_ops;
+ rc = 0;
+ }
+ break;
+ default:
+ sfc_err(sa, "Unknown filter type %u", filter_type);
+ break;
+ }
+
+ sfc_log_init(sa, "exit: %d", -rc);
+ SFC_ASSERT(rc >= 0);
+ return -rc;
+}
+
+static const struct eth_dev_ops sfc_eth_dev_ops = {
+ .dev_configure = sfc_dev_configure,
+ .dev_start = sfc_dev_start,
+ .dev_stop = sfc_dev_stop,
+ .dev_set_link_up = sfc_dev_set_link_up,
+ .dev_set_link_down = sfc_dev_set_link_down,
+ .dev_close = sfc_dev_close,
+ .promiscuous_enable = sfc_dev_promisc_enable,
+ .promiscuous_disable = sfc_dev_promisc_disable,
+ .allmulticast_enable = sfc_dev_allmulti_enable,
+ .allmulticast_disable = sfc_dev_allmulti_disable,
+ .link_update = sfc_dev_link_update,
+ .stats_get = sfc_stats_get,
+ .stats_reset = sfc_stats_reset,
+ .xstats_get = sfc_xstats_get,
+ .xstats_reset = sfc_stats_reset,
+ .xstats_get_names = sfc_xstats_get_names,
+ .dev_infos_get = sfc_dev_infos_get,
+ .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
+ .mtu_set = sfc_dev_set_mtu,
+ .rx_queue_start = sfc_rx_queue_start,
+ .rx_queue_stop = sfc_rx_queue_stop,
+ .tx_queue_start = sfc_tx_queue_start,
+ .tx_queue_stop = sfc_tx_queue_stop,
+ .rx_queue_setup = sfc_rx_queue_setup,
+ .rx_queue_release = sfc_rx_queue_release,
+ .rx_queue_count = sfc_rx_queue_count,
+ .rx_descriptor_done = sfc_rx_descriptor_done,
+ .tx_queue_setup = sfc_tx_queue_setup,
+ .tx_queue_release = sfc_tx_queue_release,
+ .flow_ctrl_get = sfc_flow_ctrl_get,
+ .flow_ctrl_set = sfc_flow_ctrl_set,
+ .mac_addr_set = sfc_mac_addr_set,
+#if EFSYS_OPT_RX_SCALE
+ .reta_update = sfc_dev_rss_reta_update,
+ .reta_query = sfc_dev_rss_reta_query,
+ .rss_hash_update = sfc_dev_rss_hash_update,
+ .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
+#endif
+ .filter_ctrl = sfc_dev_filter_ctrl,
+ .set_mc_addr_list = sfc_set_mc_addr_list,
+ .rxq_info_get = sfc_rx_queue_info_get,
+ .txq_info_get = sfc_tx_queue_info_get,
+ .fw_version_get = sfc_fw_version_get,
+};
+
+static int
+sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ unsigned int avail_caps = 0;
+ const char *rx_name = NULL;
+ const char *tx_name = NULL;
+ int rc;
+
+ switch (sa->family) {
+ case EFX_FAMILY_HUNTINGTON:
+ case EFX_FAMILY_MEDFORD:
+ avail_caps |= SFC_DP_HW_FW_CAP_EF10;
+ break;
+ default:
+ break;
+ }
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
+ sfc_kvarg_string_handler, &rx_name);
+ if (rc != 0)
+ goto fail_kvarg_rx_datapath;
+
+ if (rx_name != NULL) {
+ sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath %s not found", rx_name);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Rx datapath %s",
+ rx_name);
+ rc = EINVAL;
+ goto fail_dp_rx;
+ }
+ } else {
+ sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
+ if (sa->dp_rx == NULL) {
+ sfc_err(sa, "Rx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_rx;
+ }
+ }
+
+ sfc_info(sa, "use %s Rx datapath", sa->dp_rx->dp.name);
+
+ dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
+ sfc_kvarg_string_handler, &tx_name);
+ if (rc != 0)
+ goto fail_kvarg_tx_datapath;
+
+ if (tx_name != NULL) {
+ sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
+ if (sa->dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath %s not found", tx_name);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) {
+ sfc_err(sa,
+ "Insufficient Hw/FW capabilities to use Tx datapath %s",
+ tx_name);
+ rc = EINVAL;
+ goto fail_dp_tx;
+ }
+ } else {
+ sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
+ if (sa->dp_tx == NULL) {
+ sfc_err(sa, "Tx datapath by caps %#x not found",
+ avail_caps);
+ rc = ENOENT;
+ goto fail_dp_tx;
+ }
+ }
+
+ sfc_info(sa, "use %s Tx datapath", sa->dp_tx->dp.name);
+
+ dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
+
+ dev->dev_ops = &sfc_eth_dev_ops;
+
+ return 0;
+
+fail_dp_tx:
+fail_kvarg_tx_datapath:
+fail_dp_rx:
+fail_kvarg_rx_datapath:
+ return rc;
+}
+
+static void
+sfc_register_dp(void)
+{
+ /* Register once */
+ if (TAILQ_EMPTY(&sfc_dp_head)) {
+ /* Prefer EF10 datapath */
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
+
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
+ sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
+ }
+}
+
+static int
+sfc_eth_dev_init(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
+ int rc;
+ const efx_nic_cfg_t *encp;
+ const struct ether_addr *from;
+
+ sfc_register_dp();
+
+ /* Required for logging */
+ sa->eth_dev = dev;
+
+ /* Copy PCI device info to the dev->data */
+ rte_eth_copy_pci_info(dev, pci_dev);
+
+ rc = sfc_kvargs_parse(sa);
+ if (rc != 0)
+ goto fail_kvargs_parse;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
+ sfc_kvarg_bool_handler, &sa->debug_init);
+ if (rc != 0)
+ goto fail_kvarg_debug_init;
+
+ sfc_log_init(sa, "entry");
+
+ dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
+ if (dev->data->mac_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mac_addrs;
+ }
+
+ sfc_adapter_lock_init(sa);
+ sfc_adapter_lock(sa);
+
+ sfc_log_init(sa, "probing");
+ rc = sfc_probe(sa);
+ if (rc != 0)
+ goto fail_probe;
+
+ sfc_log_init(sa, "set device ops");
+ rc = sfc_eth_dev_set_ops(dev);
+ if (rc != 0)
+ goto fail_set_ops;
+
+ sfc_log_init(sa, "attaching");
+ rc = sfc_attach(sa);
+ if (rc != 0)
+ goto fail_attach;
+
+ encp = efx_nic_cfg_get(sa->nic);
+
+ /*
+ * The arguments are really reverse order in comparison to
+ * Linux kernel. Copy from NIC config to Ethernet device data.
+ */
+ from = (const struct ether_addr *)(encp->enc_mac_addr);
+ ether_addr_copy(from, &dev->data->mac_addrs[0]);
+
+ sfc_adapter_unlock(sa);
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_attach:
+fail_set_ops:
+ sfc_unprobe(sa);
+
+fail_probe:
+ sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+fail_mac_addrs:
+fail_kvarg_debug_init:
+ sfc_kvargs_cleanup(sa);
+
+fail_kvargs_parse:
+ sfc_log_init(sa, "failed %d", rc);
+ SFC_ASSERT(rc > 0);
+ return -rc;
+}
+
+static int
+sfc_eth_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_adapter_lock(sa);
+
+ sfc_detach(sa);
+ sfc_unprobe(sa);
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+
+ sfc_kvargs_cleanup(sa);
+
+ sfc_adapter_unlock(sa);
+ sfc_adapter_lock_fini(sa);
+
+ sfc_log_init(sa, "done");
+
+ /* Required for logging, so cleanup last */
+ sa->eth_dev = NULL;
+ return 0;
+}
+
+static const struct rte_pci_id pci_id_sfc_efx_map[] = {
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
+ { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
+ { .vendor_id = 0 /* sentinel */ }
+};
+
+static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct sfc_adapter), sfc_eth_dev_init);
+}
+
+static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
+}
+
+static struct rte_pci_driver sfc_efx_pmd = {
+ .id_table = pci_id_sfc_efx_map,
+ .drv_flags =
+ RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_NEED_MAPPING,
+ .probe = sfc_eth_dev_pci_probe,
+ .remove = sfc_eth_dev_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
+ SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
+ SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
+ SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> "
+ SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
+ SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);
diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c
new file mode 100644
index 00000000..160d39f9
--- /dev/null
+++ b/drivers/net/sfc/sfc_ev.c
@@ -0,0 +1,921 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_alarm.h>
+#include <rte_branch_prediction.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_tx.h"
+#include "sfc_kvargs.h"
+
+
+/* Initial delay when waiting for event queue init complete event */
+#define SFC_EVQ_INIT_BACKOFF_START_US (1)
+/* Maximum delay between event queue polling attempts */
+#define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
+/* Event queue init approx timeout */
+#define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
+
+/* Management event queue polling period in microseconds */
+#define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
+
+static const char *
+sfc_evq_type2str(enum sfc_evq_type type)
+{
+ switch (type) {
+ case SFC_EVQ_TYPE_MGMT:
+ return "mgmt-evq";
+ case SFC_EVQ_TYPE_RX:
+ return "rx-evq";
+ case SFC_EVQ_TYPE_TX:
+ return "tx-evq";
+ default:
+ SFC_ASSERT(B_FALSE);
+ return NULL;
+ }
+}
+
+static boolean_t
+sfc_ev_initialized(void *arg)
+{
+ struct sfc_evq *evq = arg;
+
+ /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
+ evq->init_state == SFC_EVQ_STARTED);
+
+ evq->init_state = SFC_EVQ_STARTED;
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
+ uint32_t size, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa,
+ "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
+ evq->evq_index, label, id, size, flags);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
+ uint32_t size, uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_efx_rxq *rxq;
+ unsigned int stop;
+ unsigned int pending_id;
+ unsigned int delta;
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+
+ if (unlikely(evq->exception))
+ goto done;
+
+ rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
+
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->evq == evq);
+ SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
+
+ stop = (id + 1) & rxq->ptr_mask;
+ pending_id = rxq->pending & rxq->ptr_mask;
+ delta = (stop >= pending_id) ? (stop - pending_id) :
+ (rxq->ptr_mask + 1 - pending_id + stop);
+
+ if (delta == 0) {
+ /*
+ * Rx event with no new descriptors done and zero length
+ * is used to abort scattered packet when there is no room
+ * for the tail.
+ */
+ if (unlikely(size != 0)) {
+ evq->exception = B_TRUE;
+ sfc_err(evq->sa,
+ "EVQ %u RxQ %u invalid RX abort "
+ "(id=%#x size=%u flags=%#x); needs restart",
+ evq->evq_index, rxq->dp.dpq.queue_id,
+ id, size, flags);
+ goto done;
+ }
+
+ /* Add discard flag to the first fragment */
+ rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
+ /* Remove continue flag from the last fragment */
+ rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
+ } else if (unlikely(delta > rxq->batch_max)) {
+ evq->exception = B_TRUE;
+
+ sfc_err(evq->sa,
+ "EVQ %u RxQ %u completion out of order "
+ "(id=%#x delta=%u flags=%#x); needs restart",
+ evq->evq_index, rxq->dp.dpq.queue_id,
+ id, delta, flags);
+
+ goto done;
+ }
+
+ for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
+ rxd = &rxq->sw_desc[i];
+
+ rxd->flags = flags;
+
+ SFC_ASSERT(size < (1 << 16));
+ rxd->size = (uint16_t)size;
+ }
+
+ rxq->pending += delta;
+
+done:
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
+ __rte_unused uint32_t size, __rte_unused uint16_t flags)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
+ return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
+}
+
+static boolean_t
+sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
+ evq->evq_index, label, id);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+ struct sfc_efx_txq *txq;
+ unsigned int stop;
+ unsigned int delta;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ SFC_ASSERT(txq->evq == evq);
+
+ if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
+ goto done;
+
+ stop = (id + 1) & txq->ptr_mask;
+ id = txq->pending & txq->ptr_mask;
+
+ delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
+
+ txq->pending += delta;
+
+done:
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL);
+ return evq->sa->dp_tx->qtx_ev(dp_txq, id);
+}
+
+static boolean_t
+sfc_ev_exception(void *arg, __rte_unused uint32_t code,
+ __rte_unused uint32_t data)
+{
+ struct sfc_evq *evq = arg;
+
+ if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
+ return B_FALSE;
+
+ evq->exception = B_TRUE;
+ sfc_warn(evq->sa,
+ "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
+ " needs recovery",
+ (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
+ (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
+ (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
+ (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
+ (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
+ (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
+ (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
+ (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
+ "UNKNOWN",
+ code, data, evq->evq_index);
+
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
+ evq->evq_index, rxq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_rxq *rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_done(rxq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
+ evq->evq_index, rxq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_rxq *rxq;
+
+ dp_rxq = evq->dp_rxq;
+ SFC_ASSERT(dp_rxq != NULL);
+
+ rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+ SFC_ASSERT(rxq != NULL);
+ SFC_ASSERT(rxq->hw_index == rxq_hw_index);
+ SFC_ASSERT(rxq->evq == evq);
+ sfc_rx_qflush_failed(rxq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
+ evq->evq_index, txq_hw_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_dp_txq *dp_txq;
+ struct sfc_txq *txq;
+
+ dp_txq = evq->dp_txq;
+ SFC_ASSERT(dp_txq != NULL);
+
+ txq = sfc_txq_by_dp_txq(dp_txq);
+ SFC_ASSERT(txq != NULL);
+ SFC_ASSERT(txq->hw_index == txq_hw_index);
+ SFC_ASSERT(txq->evq == evq);
+ sfc_tx_qflush_done(txq);
+
+ return B_FALSE;
+}
+
+static boolean_t
+sfc_ev_software(void *arg, uint16_t magic)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
+ evq->evq_index, magic);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_sram(void *arg, uint32_t code)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
+ evq->evq_index, code);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_wake_up(void *arg, uint32_t index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
+ evq->evq_index, index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_timer(void *arg, uint32_t index)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
+ evq->evq_index, index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
+{
+ struct sfc_evq *evq = arg;
+
+ sfc_err(evq->sa, "EVQ %u unexpected link change event",
+ evq->evq_index);
+ return B_TRUE;
+}
+
+static boolean_t
+sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
+{
+ struct sfc_evq *evq = arg;
+ struct sfc_adapter *sa = evq->sa;
+ struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
+ struct rte_eth_link new_link;
+ uint64_t new_link_u64;
+ uint64_t old_link_u64;
+
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+
+ sfc_port_link_mode_to_info(link_mode, &new_link);
+
+ new_link_u64 = *(uint64_t *)&new_link;
+ do {
+ old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
+ if (old_link_u64 == new_link_u64)
+ break;
+
+ if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
+ old_link_u64, new_link_u64)) {
+ evq->sa->port.lsc_seq++;
+ break;
+ }
+ } while (B_TRUE);
+
+ return B_FALSE;
+}
+
+static const efx_ev_callbacks_t sfc_ev_callbacks = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_efx_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_dp_rx,
+ .eec_tx = sfc_ev_nop_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_tx = sfc_ev_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
+ .eec_initialized = sfc_ev_initialized,
+ .eec_rx = sfc_ev_nop_rx,
+ .eec_tx = sfc_ev_dp_tx,
+ .eec_exception = sfc_ev_exception,
+ .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
+ .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
+ .eec_txq_flush_done = sfc_ev_txq_flush_done,
+ .eec_software = sfc_ev_software,
+ .eec_sram = sfc_ev_sram,
+ .eec_wake_up = sfc_ev_wake_up,
+ .eec_timer = sfc_ev_timer,
+ .eec_link_change = sfc_ev_nop_link_change,
+};
+
+
+void
+sfc_ev_qpoll(struct sfc_evq *evq)
+{
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
+ evq->init_state == SFC_EVQ_STARTING);
+
+ /* Synchronize the DMA memory for reading not required */
+
+ efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
+
+ if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
+ struct sfc_adapter *sa = evq->sa;
+ int rc;
+
+ if (evq->dp_rxq != NULL) {
+ unsigned int rxq_sw_index;
+
+ rxq_sw_index = evq->dp_rxq->dpq.queue_id;
+
+ sfc_warn(sa,
+ "restart RxQ %u because of exception on its EvQ %u",
+ rxq_sw_index, evq->evq_index);
+
+ sfc_rx_qstop(sa, rxq_sw_index);
+ rc = sfc_rx_qstart(sa, rxq_sw_index);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart RxQ %u",
+ rxq_sw_index);
+ }
+
+ if (evq->dp_txq != NULL) {
+ unsigned int txq_sw_index;
+
+ txq_sw_index = evq->dp_txq->dpq.queue_id;
+
+ sfc_warn(sa,
+ "restart TxQ %u because of exception on its EvQ %u",
+ txq_sw_index, evq->evq_index);
+
+ sfc_tx_qstop(sa, txq_sw_index);
+ rc = sfc_tx_qstart(sa, txq_sw_index);
+ if (rc != 0)
+ sfc_err(sa, "cannot restart TxQ %u",
+ txq_sw_index);
+ }
+
+ if (evq->exception)
+ sfc_panic(sa, "unrecoverable exception on EvQ %u",
+ evq->evq_index);
+
+ sfc_adapter_unlock(sa);
+ }
+
+ /* Poll-mode driver does not re-prime the event queue for interrupts */
+}
+
+void
+sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
+{
+ if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
+ struct sfc_evq *mgmt_evq = sa->mgmt_evq;
+
+ if (mgmt_evq->init_state == SFC_EVQ_STARTED)
+ sfc_ev_qpoll(mgmt_evq);
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ }
+}
+
+int
+sfc_ev_qprime(struct sfc_evq *evq)
+{
+ SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
+ return efx_ev_qprime(evq->common, evq->read_ptr);
+}
+
+/* Event queue HW index allocation scheme is described in sfc_ev.h. */
+int
+sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
+{
+ struct sfc_adapter *sa = evq->sa;
+ efsys_mem_t *esmp;
+ uint32_t evq_flags = sa->evq_flags;
+ unsigned int total_delay_us;
+ unsigned int delay_us;
+ int rc;
+
+ sfc_log_init(sa, "hw_index=%u", hw_index);
+
+ esmp = &evq->mem;
+
+ evq->evq_index = hw_index;
+
+ /* Clear all events */
+ (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries));
+
+ if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index)
+ evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
+ else
+ evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
+
+ /* Create the common code event queue */
+ rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
+ 0 /* unused on EF10 */, 0, evq_flags,
+ &evq->common);
+ if (rc != 0)
+ goto fail_ev_qcreate;
+
+ SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
+ if (evq->dp_rxq != 0) {
+ if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ evq->callbacks = &sfc_ev_callbacks_efx_rx;
+ else
+ evq->callbacks = &sfc_ev_callbacks_dp_rx;
+ } else if (evq->dp_txq != 0) {
+ if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
+ evq->callbacks = &sfc_ev_callbacks_efx_tx;
+ else
+ evq->callbacks = &sfc_ev_callbacks_dp_tx;
+ } else {
+ evq->callbacks = &sfc_ev_callbacks;
+ }
+
+ evq->init_state = SFC_EVQ_STARTING;
+
+ /* Wait for the initialization event */
+ total_delay_us = 0;
+ delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
+ do {
+ (void)sfc_ev_qpoll(evq);
+
+ /* Check to see if the initialization complete indication
+ * posted by the hardware.
+ */
+ if (evq->init_state == SFC_EVQ_STARTED)
+ goto done;
+
+ /* Give event queue some time to init */
+ rte_delay_us(delay_us);
+
+ total_delay_us += delay_us;
+
+ /* Exponential backoff */
+ delay_us *= 2;
+ if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
+ delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
+
+ } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
+
+ rc = ETIMEDOUT;
+ goto fail_timedout;
+
+done:
+ return 0;
+
+fail_timedout:
+ evq->init_state = SFC_EVQ_INITIALIZED;
+ efx_ev_qdestroy(evq->common);
+
+fail_ev_qcreate:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_qstop(struct sfc_evq *evq)
+{
+ if (evq == NULL)
+ return;
+
+ sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
+
+ if (evq->init_state != SFC_EVQ_STARTED)
+ return;
+
+ evq->init_state = SFC_EVQ_INITIALIZED;
+ evq->callbacks = NULL;
+ evq->read_ptr = 0;
+ evq->exception = B_FALSE;
+
+ efx_ev_qdestroy(evq->common);
+
+ evq->evq_index = 0;
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll(void *arg)
+{
+ struct sfc_adapter *sa = arg;
+ int rc;
+
+ sfc_ev_mgmt_qpoll(sa);
+
+ rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
+ sfc_ev_mgmt_periodic_qpoll, sa);
+ if (rc == -ENOTSUP) {
+ sfc_warn(sa, "alarms are not supported");
+ sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
+ } else if (rc != 0) {
+ sfc_err(sa,
+ "cannot rearm management EVQ polling alarm (rc=%d)",
+ rc);
+ }
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
+{
+ sfc_ev_mgmt_periodic_qpoll(sa);
+}
+
+static void
+sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
+{
+ rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
+}
+
+int
+sfc_ev_start(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = efx_ev_init(sa->nic);
+ if (rc != 0)
+ goto fail_ev_init;
+
+ /* Start management EVQ used for global events */
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+ rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
+ if (rc != 0)
+ goto fail_mgmt_evq_start;
+
+ if (sa->intr.lsc_intr) {
+ rc = sfc_ev_qprime(sa->mgmt_evq);
+ if (rc != 0)
+ goto fail_evq0_prime;
+ }
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+ /*
+ * Start management EVQ polling. If interrupts are disabled
+ * (not used), it is required to process link status change
+ * and other device level events to avoid unrecoverable
+ * error because the event queue overflow.
+ */
+ sfc_ev_mgmt_periodic_qpoll_start(sa);
+
+ /*
+ * Rx/Tx event queues are started/stopped when corresponding
+ * Rx/Tx queue is started/stopped.
+ */
+
+ return 0;
+
+fail_evq0_prime:
+ sfc_ev_qstop(sa->mgmt_evq);
+
+fail_mgmt_evq_start:
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+ efx_ev_fini(sa->nic);
+
+fail_ev_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_ev_mgmt_periodic_qpoll_stop(sa);
+
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+ sfc_ev_qstop(sa->mgmt_evq);
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+
+ efx_ev_fini(sa->nic);
+}
+
+int
+sfc_ev_qinit(struct sfc_adapter *sa,
+ enum sfc_evq_type type, unsigned int type_index,
+ unsigned int entries, int socket_id, struct sfc_evq **evqp)
+{
+ struct sfc_evq *evq;
+ int rc;
+
+ sfc_log_init(sa, "type=%s type_index=%u",
+ sfc_evq_type2str(type), type_index);
+
+ SFC_ASSERT(rte_is_power_of_2(entries));
+
+ rc = ENOMEM;
+ evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (evq == NULL)
+ goto fail_evq_alloc;
+
+ evq->sa = sa;
+ evq->type = type;
+ evq->entries = entries;
+
+ /* Allocate DMA space */
+ rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
+ EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ evq->init_state = SFC_EVQ_INITIALIZED;
+
+ sa->evq_count++;
+
+ *evqp = evq;
+
+ return 0;
+
+fail_dma_alloc:
+ rte_free(evq);
+
+fail_evq_alloc:
+
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_qfini(struct sfc_evq *evq)
+{
+ struct sfc_adapter *sa = evq->sa;
+
+ SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
+
+ sfc_dma_free(sa, &evq->mem);
+
+ rte_free(evq);
+
+ SFC_ASSERT(sa->evq_count > 0);
+ sa->evq_count--;
+}
+
+static int
+sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ uint64_t *value = opaque;
+
+ if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
+ else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
+ else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
+ *value = EFX_EVQ_FLAGS_TYPE_AUTO;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+sfc_ev_attach(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
+ rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
+ sfc_kvarg_perf_profile_handler,
+ &sa->evq_flags);
+ if (rc != 0) {
+ sfc_err(sa, "invalid %s parameter value",
+ SFC_KVARG_PERF_PROFILE);
+ goto fail_kvarg_perf_profile;
+ }
+
+ sa->mgmt_evq_index = 0;
+ rte_spinlock_init(&sa->mgmt_evq_lock);
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES,
+ sa->socket_id, &sa->mgmt_evq);
+ if (rc != 0)
+ goto fail_mgmt_evq_init;
+
+ /*
+ * Rx/Tx event queues are created/destroyed when corresponding
+ * Rx/Tx queue is created/destroyed.
+ */
+
+ return 0;
+
+fail_mgmt_evq_init:
+
+fail_kvarg_perf_profile:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_ev_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_ev_qfini(sa->mgmt_evq);
+
+ if (sa->evq_count != 0)
+ sfc_err(sa, "%u EvQs are not destroyed before detach",
+ sa->evq_count);
+}
diff --git a/drivers/net/sfc/sfc_ev.h b/drivers/net/sfc/sfc_ev.h
new file mode 100644
index 00000000..065defe0
--- /dev/null
+++ b/drivers/net/sfc/sfc_ev.h
@@ -0,0 +1,129 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_EV_H_
+#define _SFC_EV_H_
+
+#include <rte_ethdev.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Number of entries in the management event queue */
+#define SFC_MGMT_EVQ_ENTRIES (EFX_EVQ_MINNEVS)
+
+struct sfc_adapter;
+struct sfc_dp_rxq;
+struct sfc_dp_txq;
+
+enum sfc_evq_state {
+ SFC_EVQ_UNINITIALIZED = 0,
+ SFC_EVQ_INITIALIZED,
+ SFC_EVQ_STARTING,
+ SFC_EVQ_STARTED,
+
+ SFC_EVQ_NSTATES
+};
+
+enum sfc_evq_type {
+ SFC_EVQ_TYPE_MGMT = 0,
+ SFC_EVQ_TYPE_RX,
+ SFC_EVQ_TYPE_TX,
+
+ SFC_EVQ_NTYPES
+};
+
+struct sfc_evq {
+ /* Used on datapath */
+ efx_evq_t *common;
+ const efx_ev_callbacks_t *callbacks;
+ unsigned int read_ptr;
+ boolean_t exception;
+ efsys_mem_t mem;
+ struct sfc_dp_rxq *dp_rxq;
+ struct sfc_dp_txq *dp_txq;
+
+ /* Not used on datapath */
+ struct sfc_adapter *sa;
+ unsigned int evq_index;
+ enum sfc_evq_state init_state;
+ enum sfc_evq_type type;
+ unsigned int entries;
+};
+
+/*
+ * Functions below define event queue to transmit/receive queue and vice
+ * versa mapping.
+ * Own event queue is allocated for management, each Rx and each Tx queue.
+ * Zero event queue is used for management events.
+ * Rx event queues from 1 to RxQ number follow management event queue.
+ * Tx event queues follow Rx event queues.
+ */
+
+static inline unsigned int
+sfc_evq_index_by_rxq_sw_index(__rte_unused struct sfc_adapter *sa,
+ unsigned int rxq_sw_index)
+{
+ return 1 + rxq_sw_index;
+}
+
+static inline unsigned int
+sfc_evq_index_by_txq_sw_index(struct sfc_adapter *sa, unsigned int txq_sw_index)
+{
+ return 1 + sa->eth_dev->data->nb_rx_queues + txq_sw_index;
+}
+
+int sfc_ev_attach(struct sfc_adapter *sa);
+void sfc_ev_detach(struct sfc_adapter *sa);
+int sfc_ev_start(struct sfc_adapter *sa);
+void sfc_ev_stop(struct sfc_adapter *sa);
+
+int sfc_ev_qinit(struct sfc_adapter *sa,
+ enum sfc_evq_type type, unsigned int type_index,
+ unsigned int entries, int socket_id, struct sfc_evq **evqp);
+void sfc_ev_qfini(struct sfc_evq *evq);
+int sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index);
+void sfc_ev_qstop(struct sfc_evq *evq);
+
+int sfc_ev_qprime(struct sfc_evq *evq);
+void sfc_ev_qpoll(struct sfc_evq *evq);
+
+void sfc_ev_mgmt_qpoll(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_EV_H_ */
diff --git a/drivers/net/sfc/sfc_filter.c b/drivers/net/sfc/sfc_filter.c
new file mode 100644
index 00000000..58b74de7
--- /dev/null
+++ b/drivers/net/sfc/sfc_filter.c
@@ -0,0 +1,137 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+
+boolean_t
+sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match)
+{
+ struct sfc_filter *filter = &sa->filter;
+ size_t i;
+
+ for (i = 0; i < filter->supported_match_num; ++i) {
+ if (match == filter->supported_match[i])
+ return B_TRUE;
+ }
+
+ return B_FALSE;
+}
+
+static int
+sfc_filter_cache_match_supported(struct sfc_adapter *sa)
+{
+ struct sfc_filter *filter = &sa->filter;
+ size_t num = filter->supported_match_num;
+ uint32_t *buf = filter->supported_match;
+ unsigned int retry;
+ int rc;
+
+ /* Just a guess of possibly sufficient entries */
+ if (num == 0)
+ num = 16;
+
+ for (retry = 0; retry < 2; ++retry) {
+ if (num != filter->supported_match_num) {
+ rc = ENOMEM;
+ buf = rte_realloc(buf, num * sizeof(*buf), 0);
+ if (buf == NULL)
+ goto fail_realloc;
+ }
+
+ rc = efx_filter_supported_filters(sa->nic, buf, num, &num);
+ if (rc == 0) {
+ filter->supported_match_num = num;
+ filter->supported_match = buf;
+
+ return 0;
+ } else if (rc != ENOSPC) {
+ goto fail_efx_filter_supported_filters;
+ }
+ }
+
+ SFC_ASSERT(rc == ENOSPC);
+
+fail_efx_filter_supported_filters:
+fail_realloc:
+ /* Original pointer is not freed by rte_realloc() on failure */
+ rte_free(buf);
+ filter->supported_match = NULL;
+ filter->supported_match_num = 0;
+ return rc;
+}
+
+int
+sfc_filter_attach(struct sfc_adapter *sa)
+{
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ rc = efx_filter_init(sa->nic);
+ if (rc != 0)
+ goto fail_filter_init;
+
+ rc = sfc_filter_cache_match_supported(sa);
+ if (rc != 0)
+ goto fail_cache_match_supported;
+
+ efx_filter_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+
+ return 0;
+
+fail_cache_match_supported:
+ efx_filter_fini(sa->nic);
+
+fail_filter_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_filter_detach(struct sfc_adapter *sa)
+{
+ struct sfc_filter *filter = &sa->filter;
+
+ sfc_log_init(sa, "entry");
+
+ rte_free(filter->supported_match);
+ filter->supported_match = NULL;
+ filter->supported_match_num = 0;
+
+ sfc_log_init(sa, "done");
+}
diff --git a/drivers/net/sfc/sfc_filter.h b/drivers/net/sfc/sfc_filter.h
new file mode 100644
index 00000000..d884f37d
--- /dev/null
+++ b/drivers/net/sfc/sfc_filter.h
@@ -0,0 +1,62 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_FILTER_H
+#define _SFC_FILTER_H
+
+#include "efx.h"
+
+#include "sfc_flow.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_filter {
+ /** Number of elements in match_supported array */
+ size_t supported_match_num;
+ /** Driver cache of supported filter match masks */
+ uint32_t *supported_match;
+ /** List of flow rules */
+ struct sfc_flow_list flow_list;
+};
+
+struct sfc_adapter;
+
+int sfc_filter_attach(struct sfc_adapter *sa);
+void sfc_filter_detach(struct sfc_adapter *sa);
+
+boolean_t sfc_filter_is_match_supported(struct sfc_adapter *sa, uint32_t match);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FILTER_H */
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
new file mode 100644
index 00000000..c3ea43a6
--- /dev/null
+++ b/drivers/net/sfc/sfc_flow.c
@@ -0,0 +1,1175 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_tailq.h>
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_eth_ctrl.h>
+#include <rte_ether.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_rx.h"
+#include "sfc_filter.h"
+#include "sfc_flow.h"
+#include "sfc_log.h"
+
+/*
+ * At now flow API is implemented in such a manner that each
+ * flow rule is converted to a hardware filter.
+ * All elements of flow rule (attributes, pattern items, actions)
+ * correspond to one or more fields in the efx_filter_spec_s structure
+ * that is responsible for the hardware filter.
+ */
+
+enum sfc_flow_item_layers {
+ SFC_FLOW_ITEM_ANY_LAYER,
+ SFC_FLOW_ITEM_START_LAYER,
+ SFC_FLOW_ITEM_L2,
+ SFC_FLOW_ITEM_L3,
+ SFC_FLOW_ITEM_L4,
+};
+
+typedef int (sfc_flow_item_parse)(const struct rte_flow_item *item,
+ efx_filter_spec_t *spec,
+ struct rte_flow_error *error);
+
+struct sfc_flow_item {
+ enum rte_flow_item_type type; /* Type of item */
+ enum sfc_flow_item_layers layer; /* Layer of item */
+ enum sfc_flow_item_layers prev_layer; /* Previous layer of item */
+ sfc_flow_item_parse *parse; /* Parsing function */
+};
+
+static sfc_flow_item_parse sfc_flow_parse_void;
+static sfc_flow_item_parse sfc_flow_parse_eth;
+static sfc_flow_item_parse sfc_flow_parse_vlan;
+static sfc_flow_item_parse sfc_flow_parse_ipv4;
+static sfc_flow_item_parse sfc_flow_parse_ipv6;
+static sfc_flow_item_parse sfc_flow_parse_tcp;
+static sfc_flow_item_parse sfc_flow_parse_udp;
+
+static boolean_t
+sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
+{
+ uint8_t sum = 0;
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ sum |= buf[i];
+
+ return (sum == 0) ? B_TRUE : B_FALSE;
+}
+
+/*
+ * Validate item and prepare structures spec and mask for parsing
+ */
+static int
+sfc_flow_parse_init(const struct rte_flow_item *item,
+ const void **spec_ptr,
+ const void **mask_ptr,
+ const void *supp_mask,
+ const void *def_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ const uint8_t *spec;
+ const uint8_t *mask;
+ const uint8_t *last;
+ uint8_t match;
+ uint8_t supp;
+ unsigned int i;
+
+ if (item == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "NULL item");
+ return -rte_errno;
+ }
+
+ if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Mask or last is set without spec");
+ return -rte_errno;
+ }
+
+ /*
+ * If "mask" is not set, default mask is used,
+ * but if default mask is NULL, "mask" should be set
+ */
+ if (item->mask == NULL) {
+ if (def_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Mask should be specified");
+ return -rte_errno;
+ }
+
+ mask = (const uint8_t *)def_mask;
+ } else {
+ mask = (const uint8_t *)item->mask;
+ }
+
+ spec = (const uint8_t *)item->spec;
+ last = (const uint8_t *)item->last;
+
+ if (spec == NULL)
+ goto exit;
+
+ /*
+ * If field values in "last" are either 0 or equal to the corresponding
+ * values in "spec" then they are ignored
+ */
+ if (last != NULL &&
+ !sfc_flow_is_zero(last, size) &&
+ memcmp(last, spec, size) != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ranging is not supported");
+ return -rte_errno;
+ }
+
+ if (supp_mask == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Supported mask for item should be specified");
+ return -rte_errno;
+ }
+
+ /* Check that mask and spec not asks for more match than supp_mask */
+ for (i = 0; i < size; i++) {
+ match = spec[i] | mask[i];
+ supp = ((const uint8_t *)supp_mask)[i];
+
+ if ((match | supp) != supp) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Item's field is not supported");
+ return -rte_errno;
+ }
+ }
+
+exit:
+ *spec_ptr = spec;
+ *mask_ptr = mask;
+ return 0;
+}
+
+/*
+ * Protocol parsers.
+ * Masking is not supported, so masks in items should be either
+ * full or empty (zeroed) and set only for supported fields which
+ * are specified in the supp_mask.
+ */
+
+static int
+sfc_flow_parse_void(__rte_unused const struct rte_flow_item *item,
+ __rte_unused efx_filter_spec_t *efx_spec,
+ __rte_unused struct rte_flow_error *error)
+{
+ return 0;
+}
+
+/**
+ * Convert Ethernet item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * Ethernet type fields are supported. In addition to full and
+ * empty masks of destination address, individual/group mask is
+ * also supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_eth(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_eth *spec = NULL;
+ const struct rte_flow_item_eth *mask = NULL;
+ const struct rte_flow_item_eth supp_mask = {
+ .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ .type = 0xffff,
+ };
+ const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_eth_mask,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /* If "spec" is not set, could be any Ethernet */
+ if (spec == NULL)
+ return 0;
+
+ if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ } else if (memcmp(mask->dst.addr_bytes, ig_mask,
+ EFX_MAC_ADDR_LEN) == 0) {
+ if (is_unicast_ether_addr(&spec->dst))
+ efx_spec->efs_match_flags |=
+ EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
+ else
+ efx_spec->efs_match_flags |=
+ EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
+ } else if (!is_zero_ether_addr(&mask->dst)) {
+ goto fail_bad_mask;
+ }
+
+ if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
+ EFX_MAC_ADDR_LEN);
+ } else if (!is_zero_ether_addr(&mask->src)) {
+ goto fail_bad_mask;
+ }
+
+ /*
+ * Ether type is in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->type == supp_mask.type) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = rte_bswap16(spec->type);
+ } else if (mask->type != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the ETH pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert VLAN item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only VID field is supported.
+ * The mask can not be NULL. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_vlan(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ uint16_t vid;
+ const struct rte_flow_item_vlan *spec = NULL;
+ const struct rte_flow_item_vlan *mask = NULL;
+ const struct rte_flow_item_vlan supp_mask = {
+ .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ NULL,
+ sizeof(struct rte_flow_item_vlan),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * VID is in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used.
+ * If two VLAN items are included, the first matches
+ * the outer tag and the next matches the inner tag.
+ */
+ if (mask->tci == supp_mask.tci) {
+ vid = rte_bswap16(spec->tci);
+
+ if (!(efx_spec->efs_match_flags &
+ EFX_FILTER_MATCH_OUTER_VID)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ efx_spec->efs_outer_vid = vid;
+ } else if (!(efx_spec->efs_match_flags &
+ EFX_FILTER_MATCH_INNER_VID)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_INNER_VID;
+ efx_spec->efs_inner_vid = vid;
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "More than two VLAN items");
+ return -rte_errno;
+ }
+ } else {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "VLAN ID in TCI match is required");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Convert IPv4 item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * protocol fields are supported. If the mask is NULL, default
+ * mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_ipv4(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_ipv4 *spec = NULL;
+ const struct rte_flow_item_ipv4 *mask = NULL;
+ const uint16_t ether_type_ipv4 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV4);
+ const struct rte_flow_item_ipv4 supp_mask = {
+ .hdr = {
+ .src_addr = 0xffffffff,
+ .dst_addr = 0xffffffff,
+ .next_proto_id = 0xff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by IPv4 source and destination addresses requires
+ * the appropriate ETHER_TYPE in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = ether_type_ipv4;
+ } else if (efx_spec->efs_ether_type != ether_type_ipv4) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ethertype in pattern with IPV4 item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * IPv4 addresses are in big-endian byte order in item and in
+ * efx_spec
+ */
+ if (mask->hdr.src_addr == supp_mask.hdr.src_addr) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ efx_spec->efs_rem_host.eo_u32[0] = spec->hdr.src_addr;
+ } else if (mask->hdr.src_addr != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_addr == supp_mask.hdr.dst_addr) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ efx_spec->efs_loc_host.eo_u32[0] = spec->hdr.dst_addr;
+ } else if (mask->hdr.dst_addr != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.next_proto_id == supp_mask.hdr.next_proto_id) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = spec->hdr.next_proto_id;
+ } else if (mask->hdr.next_proto_id != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the IPV4 pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert IPv6 item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination addresses and
+ * next header fields are supported. If the mask is NULL, default
+ * mask will be used. Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_ipv6(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_ipv6 *spec = NULL;
+ const struct rte_flow_item_ipv6 *mask = NULL;
+ const uint16_t ether_type_ipv6 = rte_cpu_to_le_16(EFX_ETHER_TYPE_IPV6);
+ const struct rte_flow_item_ipv6 supp_mask = {
+ .hdr = {
+ .src_addr = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff },
+ .dst_addr = { 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff },
+ .proto = 0xff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_ipv6_mask,
+ sizeof(struct rte_flow_item_ipv6),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by IPv6 source and destination addresses requires
+ * the appropriate ETHER_TYPE in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ efx_spec->efs_ether_type = ether_type_ipv6;
+ } else if (efx_spec->efs_ether_type != ether_type_ipv6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Ethertype in pattern with IPV6 item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * IPv6 addresses are in big-endian byte order in item and in
+ * efx_spec
+ */
+ if (memcmp(mask->hdr.src_addr, supp_mask.hdr.src_addr,
+ sizeof(mask->hdr.src_addr)) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_HOST;
+
+ RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_rem_host) !=
+ sizeof(spec->hdr.src_addr));
+ rte_memcpy(&efx_spec->efs_rem_host, spec->hdr.src_addr,
+ sizeof(efx_spec->efs_rem_host));
+ } else if (!sfc_flow_is_zero(mask->hdr.src_addr,
+ sizeof(mask->hdr.src_addr))) {
+ goto fail_bad_mask;
+ }
+
+ if (memcmp(mask->hdr.dst_addr, supp_mask.hdr.dst_addr,
+ sizeof(mask->hdr.dst_addr)) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+
+ RTE_BUILD_BUG_ON(sizeof(efx_spec->efs_loc_host) !=
+ sizeof(spec->hdr.dst_addr));
+ rte_memcpy(&efx_spec->efs_loc_host, spec->hdr.dst_addr,
+ sizeof(efx_spec->efs_loc_host));
+ } else if (!sfc_flow_is_zero(mask->hdr.dst_addr,
+ sizeof(mask->hdr.dst_addr))) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.proto == supp_mask.hdr.proto) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = spec->hdr.proto;
+ } else if (mask->hdr.proto != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the IPV6 pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert TCP item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination ports fields
+ * are supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_tcp(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_tcp *spec = NULL;
+ const struct rte_flow_item_tcp *mask = NULL;
+ const struct rte_flow_item_tcp supp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_tcp_mask,
+ sizeof(struct rte_flow_item_tcp),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by TCP source and destination ports requires
+ * the appropriate IP_PROTO in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = EFX_IPPROTO_TCP;
+ } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_TCP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IP proto in pattern with TCP item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * Source and destination ports are in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->hdr.src_port == supp_mask.hdr.src_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
+ } else if (mask->hdr.src_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
+ } else if (mask->hdr.dst_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the TCP pattern item");
+ return -rte_errno;
+}
+
+/**
+ * Convert UDP item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only source and destination ports fields
+ * are supported. If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_udp(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_udp *spec = NULL;
+ const struct rte_flow_item_udp *mask = NULL;
+ const struct rte_flow_item_udp supp_mask = {
+ .hdr = {
+ .src_port = 0xffff,
+ .dst_port = 0xffff,
+ }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp),
+ error);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * Filtering by UDP source and destination ports requires
+ * the appropriate IP_PROTO in hardware filters
+ */
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = EFX_IPPROTO_UDP;
+ } else if (efx_spec->efs_ip_proto != EFX_IPPROTO_UDP) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "IP proto in pattern with UDP item should be appropriate");
+ return -rte_errno;
+ }
+
+ if (spec == NULL)
+ return 0;
+
+ /*
+ * Source and destination ports are in big-endian byte order in item and
+ * in little-endian in efx_spec, so byte swap is used
+ */
+ if (mask->hdr.src_port == supp_mask.hdr.src_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ efx_spec->efs_rem_port = rte_bswap16(spec->hdr.src_port);
+ } else if (mask->hdr.src_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ if (mask->hdr.dst_port == supp_mask.hdr.dst_port) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ efx_spec->efs_loc_port = rte_bswap16(spec->hdr.dst_port);
+ } else if (mask->hdr.dst_port != 0) {
+ goto fail_bad_mask;
+ }
+
+ return 0;
+
+fail_bad_mask:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the UDP pattern item");
+ return -rte_errno;
+}
+
+static const struct sfc_flow_item sfc_flow_items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .parse = sfc_flow_parse_void,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .prev_layer = SFC_FLOW_ITEM_START_LAYER,
+ .layer = SFC_FLOW_ITEM_L2,
+ .parse = sfc_flow_parse_eth,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VLAN,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L2,
+ .parse = sfc_flow_parse_vlan,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L3,
+ .parse = sfc_flow_parse_ipv4,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV6,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L3,
+ .parse = sfc_flow_parse_ipv6,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_TCP,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_L4,
+ .parse = sfc_flow_parse_tcp,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_UDP,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_L4,
+ .parse = sfc_flow_parse_udp,
+ },
+};
+
+/*
+ * Protocol-independent flow API support
+ */
+static int
+sfc_flow_parse_attr(const struct rte_flow_attr *attr,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ if (attr == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "NULL attribute");
+ return -rte_errno;
+ }
+ if (attr->group != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, attr,
+ "Groups are not supported");
+ return -rte_errno;
+ }
+ if (attr->priority != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
+ "Priorities are not supported");
+ return -rte_errno;
+ }
+ if (attr->egress != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
+ "Egress is not supported");
+ return -rte_errno;
+ }
+ if (attr->ingress == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+ "Only ingress is supported");
+ return -rte_errno;
+ }
+
+ flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
+ flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+
+ return 0;
+}
+
+/* Get item from array sfc_flow_items */
+static const struct sfc_flow_item *
+sfc_flow_get_item(enum rte_flow_item_type type)
+{
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(sfc_flow_items); i++)
+ if (sfc_flow_items[i].type == type)
+ return &sfc_flow_items[i];
+
+ return NULL;
+}
+
+static int
+sfc_flow_parse_pattern(const struct rte_flow_item pattern[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc;
+ unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
+ const struct sfc_flow_item *item;
+
+ if (pattern == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
+ "NULL pattern");
+ return -rte_errno;
+ }
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ item = sfc_flow_get_item(pattern->type);
+ if (item == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Unsupported pattern item");
+ return -rte_errno;
+ }
+
+ /*
+ * Omitting one or several protocol layers at the beginning
+ * of pattern is supported
+ */
+ if (item->prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+ prev_layer != SFC_FLOW_ITEM_ANY_LAYER &&
+ item->prev_layer != prev_layer) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, pattern,
+ "Unexpected sequence of pattern items");
+ return -rte_errno;
+ }
+
+ rc = item->parse(pattern, &flow->spec, error);
+ if (rc != 0)
+ return rc;
+
+ if (item->layer != SFC_FLOW_ITEM_ANY_LAYER)
+ prev_layer = item->layer;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_parse_queue(struct sfc_adapter *sa,
+ const struct rte_flow_action_queue *queue,
+ struct rte_flow *flow)
+{
+ struct sfc_rxq *rxq;
+
+ if (queue->index >= sa->rxq_count)
+ return -EINVAL;
+
+ rxq = sa->rxq_info[queue->index].rxq;
+ flow->spec.efs_dmaq_id = (uint16_t)rxq->hw_index;
+
+ return 0;
+}
+
+static int
+sfc_flow_parse_actions(struct sfc_adapter *sa,
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc;
+ boolean_t is_specified = B_FALSE;
+
+ if (actions == NULL) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, NULL,
+ "NULL actions");
+ return -rte_errno;
+ }
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ rc = sfc_flow_parse_queue(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad QUEUE action");
+ return -rte_errno;
+ }
+
+ is_specified = B_TRUE;
+ break;
+
+ default:
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Action is not supported");
+ return -rte_errno;
+ }
+ }
+
+ if (!is_specified) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_NUM, actions,
+ "Action is unspecified");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ int rc;
+
+ memset(&flow->spec, 0, sizeof(flow->spec));
+
+ rc = sfc_flow_parse_attr(attr, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ rc = sfc_flow_parse_pattern(pattern, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ rc = sfc_flow_parse_actions(sa, actions, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ if (!sfc_filter_is_match_supported(sa, flow->spec.efs_match_flags)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Flow rule pattern is not supported");
+ return -rte_errno;
+ }
+
+fail_bad_value:
+ return rc;
+}
+
+static int
+sfc_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct rte_flow flow;
+
+ return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
+}
+
+static struct rte_flow *
+sfc_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow = NULL;
+ int rc;
+
+ flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate memory");
+ goto fail_no_mem;
+ }
+
+ rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
+ if (rc != 0)
+ goto fail_bad_value;
+
+ TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
+
+ sfc_adapter_lock(sa);
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_filter_insert(sa->nic, &flow->spec);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to insert filter");
+ goto fail_filter_insert;
+ }
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return flow;
+
+fail_filter_insert:
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+
+fail_bad_value:
+ rte_free(flow);
+ sfc_adapter_unlock(sa);
+
+fail_no_mem:
+ return NULL;
+}
+
+static int
+sfc_flow_remove(struct sfc_adapter *sa,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ int rc = 0;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ rc = efx_filter_remove(sa->nic, &flow->spec);
+ if (rc != 0)
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to destroy flow rule");
+ }
+
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+ rte_free(flow);
+
+ return rc;
+}
+
+static int
+sfc_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow_ptr;
+ int rc = EINVAL;
+
+ sfc_adapter_lock(sa);
+
+ TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
+ if (flow_ptr == flow)
+ rc = 0;
+ }
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "Failed to find flow rule to destroy");
+ goto fail_bad_value;
+ }
+
+ rc = sfc_flow_remove(sa, flow, error);
+
+fail_bad_value:
+ sfc_adapter_unlock(sa);
+
+ return -rc;
+}
+
+static int
+sfc_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = dev->data->dev_private;
+ struct rte_flow *flow;
+ int rc = 0;
+ int ret = 0;
+
+ sfc_adapter_lock(sa);
+
+ while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+ rc = sfc_flow_remove(sa, flow, error);
+ if (rc != 0)
+ ret = rc;
+ }
+
+ sfc_adapter_unlock(sa);
+
+ return -ret;
+}
+
+const struct rte_flow_ops sfc_flow_ops = {
+ .validate = sfc_flow_validate,
+ .create = sfc_flow_create,
+ .destroy = sfc_flow_destroy,
+ .flush = sfc_flow_flush,
+ .query = NULL,
+};
+
+void
+sfc_flow_init(struct sfc_adapter *sa)
+{
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_INIT(&sa->filter.flow_list);
+}
+
+void
+sfc_flow_fini(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
+ TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+ rte_free(flow);
+ }
+}
+
+void
+sfc_flow_stop(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
+ efx_filter_remove(sa->nic, &flow->spec);
+}
+
+int
+sfc_flow_start(struct sfc_adapter *sa)
+{
+ struct rte_flow *flow;
+ int rc = 0;
+
+ sfc_log_init(sa, "entry");
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
+ rc = efx_filter_insert(sa->nic, &flow->spec);
+ if (rc != 0)
+ goto fail_bad_flow;
+ }
+
+ sfc_log_init(sa, "done");
+
+fail_bad_flow:
+ return rc;
+}
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
new file mode 100644
index 00000000..bfc34364
--- /dev/null
+++ b/drivers/net/sfc/sfc_flow.h
@@ -0,0 +1,64 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_FLOW_H
+#define _SFC_FLOW_H
+
+#include <rte_tailq.h>
+#include <rte_flow_driver.h>
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* PMD-specific definition of the opaque type from rte_flow.h */
+struct rte_flow {
+ efx_filter_spec_t spec; /* filter specification */
+ TAILQ_ENTRY(rte_flow) entries; /* flow list entries */
+};
+
+TAILQ_HEAD(sfc_flow_list, rte_flow);
+
+extern const struct rte_flow_ops sfc_flow_ops;
+
+struct sfc_adapter;
+
+void sfc_flow_init(struct sfc_adapter *sa);
+void sfc_flow_fini(struct sfc_adapter *sa);
+int sfc_flow_start(struct sfc_adapter *sa);
+void sfc_flow_stop(struct sfc_adapter *sa);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_FLOW_H */
diff --git a/drivers/net/sfc/sfc_intr.c b/drivers/net/sfc/sfc_intr.c
new file mode 100644
index 00000000..7eb4b86c
--- /dev/null
+++ b/drivers/net/sfc/sfc_intr.c
@@ -0,0 +1,342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * At the momemt of writing DPDK v16.07 has notion of two types of
+ * interrupts: LSC (link status change) and RXQ (receive indication).
+ * It allows to register interrupt callback for entire device which is
+ * not intended to be used for receive indication (i.e. link status
+ * change indication only). The handler has no information which HW
+ * interrupt has triggered it, so we don't know which event queue should
+ * be polled/reprimed (except qmask in the case of legacy line interrupt).
+ */
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+
+static void
+sfc_intr_handle_mgmt_evq(struct sfc_adapter *sa)
+{
+ struct sfc_evq *evq;
+
+ rte_spinlock_lock(&sa->mgmt_evq_lock);
+
+ evq = sa->mgmt_evq;
+
+ if (evq->init_state != SFC_EVQ_STARTED) {
+ sfc_log_init(sa, "interrupt on stopped EVQ %u", evq->evq_index);
+ } else {
+ sfc_ev_qpoll(evq);
+
+ if (sfc_ev_qprime(evq) != 0)
+ sfc_err(sa, "cannot prime EVQ %u", evq->evq_index);
+ }
+
+ rte_spinlock_unlock(&sa->mgmt_evq_lock);
+}
+
+static void
+sfc_intr_line_handler(void *cb_arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg;
+ efx_nic_t *enp = sa->nic;
+ boolean_t fatal;
+ uint32_t qmask;
+ unsigned int lsc_seq = sa->port.lsc_seq;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED &&
+ sa->state != SFC_ADAPTER_STARTING &&
+ sa->state != SFC_ADAPTER_STOPPING) {
+ sfc_log_init(sa,
+ "interrupt on stopped adapter, don't reenable");
+ goto exit;
+ }
+
+ efx_intr_status_line(enp, &fatal, &qmask);
+ if (fatal) {
+ (void)efx_intr_disable(enp);
+ (void)efx_intr_fatal(enp);
+ sfc_err(sa, "fatal, interrupts disabled");
+ goto exit;
+ }
+
+ if (qmask & (1 << sa->mgmt_evq_index))
+ sfc_intr_handle_mgmt_evq(sa);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) != 0)
+ sfc_err(sa, "cannot reenable interrupts");
+
+ sfc_log_init(sa, "done");
+
+exit:
+ if (lsc_seq != sa->port.lsc_seq) {
+ sfc_info(sa, "link status change event: link %s",
+ sa->eth_dev->data->dev_link.link_status ?
+ "UP" : "DOWN");
+ _rte_eth_dev_callback_process(sa->eth_dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+}
+
+static void
+sfc_intr_message_handler(void *cb_arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)cb_arg;
+ efx_nic_t *enp = sa->nic;
+ boolean_t fatal;
+ unsigned int lsc_seq = sa->port.lsc_seq;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (sa->state != SFC_ADAPTER_STARTED &&
+ sa->state != SFC_ADAPTER_STARTING &&
+ sa->state != SFC_ADAPTER_STOPPING) {
+ sfc_log_init(sa, "adapter not-started, don't reenable");
+ goto exit;
+ }
+
+ efx_intr_status_message(enp, sa->mgmt_evq_index, &fatal);
+ if (fatal) {
+ (void)efx_intr_disable(enp);
+ (void)efx_intr_fatal(enp);
+ sfc_err(sa, "fatal, interrupts disabled");
+ goto exit;
+ }
+
+ sfc_intr_handle_mgmt_evq(sa);
+
+ if (rte_intr_enable(&pci_dev->intr_handle) != 0)
+ sfc_err(sa, "cannot reenable interrupts");
+
+ sfc_log_init(sa, "done");
+
+exit:
+ if (lsc_seq != sa->port.lsc_seq) {
+ sfc_info(sa, "link status change event");
+ _rte_eth_dev_callback_process(sa->eth_dev,
+ RTE_ETH_EVENT_INTR_LSC, NULL);
+ }
+}
+
+int
+sfc_intr_start(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_intr_handle *intr_handle;
+ struct rte_pci_device *pci_dev;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ /*
+ * The EFX common code event queue module depends on the interrupt
+ * module. Ensure that the interrupt module is always initialized
+ * (even if interrupts are not used). Status memory is required
+ * for Siena only and may be NULL for EF10.
+ */
+ sfc_log_init(sa, "efx_intr_init");
+ rc = efx_intr_init(sa->nic, intr->type, NULL);
+ if (rc != 0)
+ goto fail_intr_init;
+
+ pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+ intr_handle = &pci_dev->intr_handle;
+
+ if (intr->handler != NULL) {
+ sfc_log_init(sa, "rte_intr_callback_register");
+ rc = rte_intr_callback_register(intr_handle, intr->handler,
+ (void *)sa);
+ if (rc != 0) {
+ sfc_err(sa,
+ "cannot register interrupt handler (rc=%d)",
+ rc);
+ /*
+ * Convert error code from negative returned by RTE API
+ * to positive used in the driver.
+ */
+ rc = -rc;
+ goto fail_rte_intr_cb_reg;
+ }
+
+ sfc_log_init(sa, "rte_intr_enable");
+ rc = rte_intr_enable(intr_handle);
+ if (rc != 0) {
+ sfc_err(sa, "cannot enable interrupts (rc=%d)", rc);
+ /*
+ * Convert error code from negative returned by RTE API
+ * to positive used in the driver.
+ */
+ rc = -rc;
+ goto fail_rte_intr_enable;
+ }
+
+ sfc_log_init(sa, "efx_intr_enable");
+ efx_intr_enable(sa->nic);
+ }
+
+ sfc_log_init(sa, "done type=%u max_intr=%d nb_efd=%u vec=%p",
+ intr_handle->type, intr_handle->max_intr,
+ intr_handle->nb_efd, intr_handle->intr_vec);
+ return 0;
+
+fail_rte_intr_enable:
+ rte_intr_callback_unregister(intr_handle, intr->handler, (void *)sa);
+
+fail_rte_intr_cb_reg:
+ efx_intr_fini(sa->nic);
+
+fail_intr_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_intr_stop(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ if (intr->handler != NULL) {
+ struct rte_intr_handle *intr_handle;
+ int rc;
+
+ efx_intr_disable(sa->nic);
+
+ intr_handle = &pci_dev->intr_handle;
+ if (rte_intr_disable(intr_handle) != 0)
+ sfc_err(sa, "cannot disable interrupts");
+
+ while ((rc = rte_intr_callback_unregister(intr_handle,
+ intr->handler, (void *)sa)) == -EAGAIN)
+ ;
+ if (rc != 1)
+ sfc_err(sa,
+ "cannot unregister interrupt handler %d",
+ rc);
+ }
+
+ efx_intr_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_intr_configure(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+
+ sfc_log_init(sa, "entry");
+
+ intr->handler = NULL;
+ intr->lsc_intr = (sa->eth_dev->data->dev_conf.intr_conf.lsc != 0);
+ if (!intr->lsc_intr) {
+ sfc_info(sa, "LSC tracking using interrupts is disabled");
+ goto done;
+ }
+
+ switch (intr->type) {
+ case EFX_INTR_MESSAGE:
+ intr->handler = sfc_intr_message_handler;
+ break;
+ case EFX_INTR_LINE:
+ intr->handler = sfc_intr_line_handler;
+ break;
+ case EFX_INTR_INVALID:
+ sfc_warn(sa, "interrupts are not supported");
+ break;
+ default:
+ sfc_panic(sa, "unexpected EFX interrupt type %u\n", intr->type);
+ break;
+ }
+
+done:
+ sfc_log_init(sa, "done");
+ return 0;
+}
+
+void
+sfc_intr_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_intr_attach(struct sfc_adapter *sa)
+{
+ struct sfc_intr *intr = &sa->intr;
+ struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(sa->eth_dev);
+
+ sfc_log_init(sa, "entry");
+
+ switch (pci_dev->intr_handle.type) {
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ case RTE_INTR_HANDLE_UIO_INTX:
+ case RTE_INTR_HANDLE_VFIO_LEGACY:
+ intr->type = EFX_INTR_LINE;
+ break;
+ case RTE_INTR_HANDLE_UIO:
+ case RTE_INTR_HANDLE_VFIO_MSI:
+ case RTE_INTR_HANDLE_VFIO_MSIX:
+ intr->type = EFX_INTR_MESSAGE;
+ break;
+#endif
+ default:
+ intr->type = EFX_INTR_INVALID;
+ break;
+ }
+
+ sfc_log_init(sa, "done");
+ return 0;
+}
+
+void
+sfc_intr_detach(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ sa->intr.type = EFX_INTR_INVALID;
+
+ sfc_log_init(sa, "done");
+}
diff --git a/drivers/net/sfc/sfc_kvargs.c b/drivers/net/sfc/sfc_kvargs.c
new file mode 100644
index 00000000..7bcd5951
--- /dev/null
+++ b/drivers/net/sfc/sfc_kvargs.c
@@ -0,0 +1,145 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+#include <strings.h>
+
+#include <rte_devargs.h>
+#include <rte_kvargs.h>
+
+#include "sfc.h"
+#include "sfc_kvargs.h"
+
+int
+sfc_kvargs_parse(struct sfc_adapter *sa)
+{
+ struct rte_eth_dev *eth_dev = (sa)->eth_dev;
+ struct rte_devargs *devargs = eth_dev->device->devargs;
+ const char **params = (const char *[]){
+ SFC_KVARG_STATS_UPDATE_PERIOD_MS,
+ SFC_KVARG_DEBUG_INIT,
+ SFC_KVARG_MCDI_LOGGING,
+ SFC_KVARG_PERF_PROFILE,
+ SFC_KVARG_RX_DATAPATH,
+ SFC_KVARG_TX_DATAPATH,
+ NULL,
+ };
+
+ if (devargs == NULL)
+ return 0;
+
+ sa->kvargs = rte_kvargs_parse(devargs->args, params);
+ if (sa->kvargs == NULL)
+ return EINVAL;
+
+ return 0;
+}
+
+void
+sfc_kvargs_cleanup(struct sfc_adapter *sa)
+{
+ rte_kvargs_free(sa->kvargs);
+}
+
+static int
+sfc_kvarg_match_value(const char *value, const char * const *values,
+ unsigned int n_values)
+{
+ unsigned int i;
+
+ for (i = 0; i < n_values; ++i)
+ if (strcasecmp(value, values[i]) == 0)
+ return 1;
+
+ return 0;
+}
+
+int
+sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match,
+ arg_handler_t handler, void *opaque_arg)
+{
+ if (sa->kvargs == NULL)
+ return 0;
+
+ return -rte_kvargs_process(sa->kvargs, key_match, handler, opaque_arg);
+}
+
+int
+sfc_kvarg_bool_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ const char * const true_strs[] = {
+ "1", "y", "yes", "on", "true"
+ };
+ const char * const false_strs[] = {
+ "0", "n", "no", "off", "false"
+ };
+ bool *value = opaque;
+
+ if (sfc_kvarg_match_value(value_str, true_strs,
+ RTE_DIM(true_strs)))
+ *value = true;
+ else if (sfc_kvarg_match_value(value_str, false_strs,
+ RTE_DIM(false_strs)))
+ *value = false;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+int
+sfc_kvarg_long_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ long value;
+ char *endptr;
+
+ if (!value_str || !opaque)
+ return -EINVAL;
+
+ value = strtol(value_str, &endptr, 0);
+ if (endptr == value_str)
+ return -EINVAL;
+
+ *(long *)opaque = value;
+
+ return 0;
+}
+
+int
+sfc_kvarg_string_handler(__rte_unused const char *key,
+ const char *value_str, void *opaque)
+{
+ *(const char **)opaque = value_str;
+
+ return 0;
+}
diff --git a/drivers/net/sfc/sfc_kvargs.h b/drivers/net/sfc/sfc_kvargs.h
new file mode 100644
index 00000000..d9c3b1da
--- /dev/null
+++ b/drivers/net/sfc/sfc_kvargs.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_KVARGS_H
+#define _SFC_KVARGS_H
+
+#include <rte_kvargs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define SFC_KVARG_VALUES_BOOL "[1|y|yes|on|0|n|no|off]"
+
+#define SFC_KVARG_DEBUG_INIT "debug_init"
+
+#define SFC_KVARG_MCDI_LOGGING "mcdi_logging"
+
+#define SFC_KVARG_PERF_PROFILE "perf_profile"
+
+#define SFC_KVARG_PERF_PROFILE_AUTO "auto"
+#define SFC_KVARG_PERF_PROFILE_THROUGHPUT "throughput"
+#define SFC_KVARG_PERF_PROFILE_LOW_LATENCY "low-latency"
+#define SFC_KVARG_VALUES_PERF_PROFILE \
+ "[" SFC_KVARG_PERF_PROFILE_AUTO "|" \
+ SFC_KVARG_PERF_PROFILE_THROUGHPUT "|" \
+ SFC_KVARG_PERF_PROFILE_LOW_LATENCY "]"
+
+#define SFC_KVARG_STATS_UPDATE_PERIOD_MS "stats_update_period_ms"
+
+#define SFC_KVARG_DATAPATH_EFX "efx"
+#define SFC_KVARG_DATAPATH_EF10 "ef10"
+#define SFC_KVARG_DATAPATH_EF10_SIMPLE "ef10_simple"
+
+#define SFC_KVARG_RX_DATAPATH "rx_datapath"
+#define SFC_KVARG_VALUES_RX_DATAPATH \
+ "[" SFC_KVARG_DATAPATH_EFX "|" \
+ SFC_KVARG_DATAPATH_EF10 "]"
+
+#define SFC_KVARG_TX_DATAPATH "tx_datapath"
+#define SFC_KVARG_VALUES_TX_DATAPATH \
+ "[" SFC_KVARG_DATAPATH_EFX "|" \
+ SFC_KVARG_DATAPATH_EF10 "|" \
+ SFC_KVARG_DATAPATH_EF10_SIMPLE "]"
+
+struct sfc_adapter;
+
+int sfc_kvargs_parse(struct sfc_adapter *sa);
+void sfc_kvargs_cleanup(struct sfc_adapter *sa);
+
+int sfc_kvargs_process(struct sfc_adapter *sa, const char *key_match,
+ arg_handler_t handler, void *opaque_arg);
+
+int sfc_kvarg_bool_handler(const char *key, const char *value_str,
+ void *opaque);
+int sfc_kvarg_long_handler(const char *key, const char *value_str,
+ void *opaque);
+int sfc_kvarg_string_handler(const char *key, const char *value_str,
+ void *opaque);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SFC_KVARGS_H */
diff --git a/drivers/net/sfc/sfc_log.h b/drivers/net/sfc/sfc_log.h
new file mode 100644
index 00000000..8a5e2302
--- /dev/null
+++ b/drivers/net/sfc/sfc_log.h
@@ -0,0 +1,76 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_LOG_H_
+#define _SFC_LOG_H_
+
+/* Log PMD message, automatically add prefix and \n */
+#define SFC_LOG(sa, level, ...) \
+ do { \
+ const struct rte_eth_dev *_dev = (sa)->eth_dev; \
+ const struct rte_pci_device *_pci_dev = SFC_DEV_TO_PCI(_dev); \
+ \
+ RTE_LOG(level, PMD, \
+ RTE_FMT("sfc_efx " PCI_PRI_FMT " #%" PRIu8 ": " \
+ RTE_FMT_HEAD(__VA_ARGS__,) "\n", \
+ _pci_dev->addr.domain, \
+ _pci_dev->addr.bus, \
+ _pci_dev->addr.devid, \
+ _pci_dev->addr.function, \
+ _dev->data->port_id, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+#define sfc_err(sa, ...) \
+ SFC_LOG(sa, ERR, __VA_ARGS__)
+
+#define sfc_warn(sa, ...) \
+ SFC_LOG(sa, WARNING, __VA_ARGS__)
+
+#define sfc_notice(sa, ...) \
+ SFC_LOG(sa, NOTICE, __VA_ARGS__)
+
+#define sfc_info(sa, ...) \
+ SFC_LOG(sa, INFO, __VA_ARGS__)
+
+#define sfc_log_init(sa, ...) \
+ do { \
+ const struct sfc_adapter *_sa = (sa); \
+ \
+ if (_sa->debug_init) \
+ SFC_LOG(_sa, INFO, \
+ RTE_FMT("%s(): " \
+ RTE_FMT_HEAD(__VA_ARGS__,), \
+ __func__, \
+ RTE_FMT_TAIL(__VA_ARGS__,))); \
+ } while (0)
+
+#endif /* _SFC_LOG_H_ */
diff --git a/drivers/net/sfc/sfc_mcdi.c b/drivers/net/sfc/sfc_mcdi.c
new file mode 100644
index 00000000..0faad3ed
--- /dev/null
+++ b/drivers/net/sfc/sfc_mcdi.c
@@ -0,0 +1,331 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cycles.h>
+
+#include "efx.h"
+#include "efx_mcdi.h"
+#include "efx_regs_mcdi.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+#include "sfc_ev.h"
+
+#define SFC_MCDI_POLL_INTERVAL_MIN_US 10 /* 10us in 1us units */
+#define SFC_MCDI_POLL_INTERVAL_MAX_US (US_PER_S / 10) /* 100ms in 1us units */
+#define SFC_MCDI_WATCHDOG_INTERVAL_US (10 * US_PER_S) /* 10s in 1us units */
+
+static void
+sfc_mcdi_timeout(struct sfc_adapter *sa)
+{
+ sfc_warn(sa, "MC TIMEOUT");
+
+ sfc_panic(sa, "MCDI timeout handling is not implemented\n");
+}
+
+static inline boolean_t
+sfc_mcdi_proxy_event_available(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+
+ mcdi->proxy_handle = 0;
+ mcdi->proxy_result = ETIMEDOUT;
+ sfc_ev_mgmt_qpoll(sa);
+ if (mcdi->proxy_result != ETIMEDOUT)
+ return B_TRUE;
+
+ return B_FALSE;
+}
+
+static void
+sfc_mcdi_poll(struct sfc_adapter *sa, boolean_t proxy)
+{
+ efx_nic_t *enp;
+ unsigned int delay_total;
+ unsigned int delay_us;
+ boolean_t aborted __rte_unused;
+
+ delay_total = 0;
+ delay_us = SFC_MCDI_POLL_INTERVAL_MIN_US;
+ enp = sa->nic;
+
+ do {
+ boolean_t poll_completed;
+
+ poll_completed = (proxy) ? sfc_mcdi_proxy_event_available(sa) :
+ efx_mcdi_request_poll(enp);
+ if (poll_completed)
+ return;
+
+ if (delay_total > SFC_MCDI_WATCHDOG_INTERVAL_US) {
+ if (!proxy) {
+ aborted = efx_mcdi_request_abort(enp);
+ SFC_ASSERT(aborted);
+ sfc_mcdi_timeout(sa);
+ }
+
+ return;
+ }
+
+ rte_delay_us(delay_us);
+
+ delay_total += delay_us;
+
+ /* Exponentially back off the poll frequency */
+ RTE_BUILD_BUG_ON(SFC_MCDI_POLL_INTERVAL_MAX_US > UINT_MAX / 2);
+ delay_us *= 2;
+ if (delay_us > SFC_MCDI_POLL_INTERVAL_MAX_US)
+ delay_us = SFC_MCDI_POLL_INTERVAL_MAX_US;
+
+ } while (1);
+}
+
+static void
+sfc_mcdi_execute(void *arg, efx_mcdi_req_t *emrp)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+ uint32_t proxy_handle;
+
+ rte_spinlock_lock(&mcdi->lock);
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+
+ efx_mcdi_request_start(sa->nic, emrp, B_FALSE);
+ sfc_mcdi_poll(sa, B_FALSE);
+
+ if (efx_mcdi_get_proxy_handle(sa->nic, emrp, &proxy_handle) == 0) {
+ /*
+ * Authorization is required for the MCDI request;
+ * wait for an MCDI proxy response event to bring
+ * a non-zero proxy handle (should be the same as
+ * the value obtained above) and operation status
+ */
+ sfc_mcdi_poll(sa, B_TRUE);
+
+ if ((mcdi->proxy_handle != 0) &&
+ (mcdi->proxy_handle != proxy_handle)) {
+ sfc_err(sa, "Unexpected MCDI proxy event");
+ emrp->emr_rc = EFAULT;
+ } else if (mcdi->proxy_result == 0) {
+ /*
+ * Authorization succeeded; re-issue the original
+ * request and poll for an ordinary MCDI response
+ */
+ efx_mcdi_request_start(sa->nic, emrp, B_FALSE);
+ sfc_mcdi_poll(sa, B_FALSE);
+ } else {
+ emrp->emr_rc = mcdi->proxy_result;
+ sfc_err(sa, "MCDI proxy authorization failed "
+ "(handle=%08x, result=%d)",
+ proxy_handle, mcdi->proxy_result);
+ }
+ }
+
+ rte_spinlock_unlock(&mcdi->lock);
+}
+
+static void
+sfc_mcdi_ev_cpl(void *arg)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi __rte_unused;
+
+ mcdi = &sa->mcdi;
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+
+ /* MCDI is polled, completions are not expected */
+ SFC_ASSERT(0);
+}
+
+static void
+sfc_mcdi_exception(void *arg, efx_mcdi_exception_t eme)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+
+ sfc_warn(sa, "MC %s",
+ (eme == EFX_MCDI_EXCEPTION_MC_REBOOT) ? "REBOOT" :
+ (eme == EFX_MCDI_EXCEPTION_MC_BADASSERT) ? "BADASSERT" : "UNKNOWN");
+
+ sfc_panic(sa, "MCDI exceptions handling is not implemented\n");
+}
+
+#define SFC_MCDI_LOG_BUF_SIZE 128
+
+static size_t
+sfc_mcdi_do_log(const struct sfc_adapter *sa,
+ char *buffer, void *data, size_t data_size,
+ size_t pfxsize, size_t position)
+{
+ uint32_t *words = data;
+ /* Space separator plus 2 characters per byte */
+ const size_t word_str_space = 1 + 2 * sizeof(*words);
+ size_t i;
+
+ for (i = 0; i < data_size; i += sizeof(*words)) {
+ if (position + word_str_space >=
+ SFC_MCDI_LOG_BUF_SIZE) {
+ /* Flush at SFC_MCDI_LOG_BUF_SIZE with backslash
+ * at the end which is required by netlogdecode.
+ */
+ buffer[position] = '\0';
+ sfc_info(sa, "%s \\", buffer);
+ /* Preserve prefix for the next log message */
+ position = pfxsize;
+ }
+ position += snprintf(buffer + position,
+ SFC_MCDI_LOG_BUF_SIZE - position,
+ " %08x", *words);
+ words++;
+ }
+ return position;
+}
+
+static void
+sfc_mcdi_logger(void *arg, efx_log_msg_t type,
+ void *header, size_t header_size,
+ void *data, size_t data_size)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ char buffer[SFC_MCDI_LOG_BUF_SIZE];
+ size_t pfxsize;
+ size_t start;
+
+ if (!sa->mcdi.logging)
+ return;
+
+ /* The format including prefix added by sfc_info() is the format
+ * consumed by the Solarflare netlogdecode tool.
+ */
+ pfxsize = snprintf(buffer, sizeof(buffer), "MCDI RPC %s:",
+ type == EFX_LOG_MCDI_REQUEST ? "REQ" :
+ type == EFX_LOG_MCDI_RESPONSE ? "RESP" : "???");
+ start = sfc_mcdi_do_log(sa, buffer, header, header_size,
+ pfxsize, pfxsize);
+ start = sfc_mcdi_do_log(sa, buffer, data, data_size, pfxsize, start);
+ if (start != pfxsize) {
+ buffer[start] = '\0';
+ sfc_info(sa, "%s", buffer);
+ }
+}
+
+static void
+sfc_mcdi_ev_proxy_response(void *arg, uint32_t handle, efx_rc_t result)
+{
+ struct sfc_adapter *sa = (struct sfc_adapter *)arg;
+ struct sfc_mcdi *mcdi = &sa->mcdi;
+
+ mcdi->proxy_handle = handle;
+ mcdi->proxy_result = result;
+}
+
+int
+sfc_mcdi_init(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi;
+ size_t max_msg_size;
+ efx_mcdi_transport_t *emtp;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ mcdi = &sa->mcdi;
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_UNINITIALIZED);
+
+ rte_spinlock_init(&mcdi->lock);
+
+ mcdi->state = SFC_MCDI_INITIALIZED;
+
+ max_msg_size = sizeof(uint32_t) + MCDI_CTL_SDU_LEN_MAX_V2;
+ rc = sfc_dma_alloc(sa, "mcdi", 0, max_msg_size, sa->socket_id,
+ &mcdi->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ /* Convert negative error to positive used in the driver */
+ rc = sfc_kvargs_process(sa, SFC_KVARG_MCDI_LOGGING,
+ sfc_kvarg_bool_handler, &mcdi->logging);
+ if (rc != 0)
+ goto fail_kvargs_process;
+
+ emtp = &mcdi->transport;
+ emtp->emt_context = sa;
+ emtp->emt_dma_mem = &mcdi->mem;
+ emtp->emt_execute = sfc_mcdi_execute;
+ emtp->emt_ev_cpl = sfc_mcdi_ev_cpl;
+ emtp->emt_exception = sfc_mcdi_exception;
+ emtp->emt_logger = sfc_mcdi_logger;
+ emtp->emt_ev_proxy_response = sfc_mcdi_ev_proxy_response;
+
+ sfc_log_init(sa, "init MCDI");
+ rc = efx_mcdi_init(sa->nic, emtp);
+ if (rc != 0)
+ goto fail_mcdi_init;
+
+ return 0;
+
+fail_mcdi_init:
+ memset(emtp, 0, sizeof(*emtp));
+
+fail_kvargs_process:
+ sfc_dma_free(sa, &mcdi->mem);
+
+fail_dma_alloc:
+ mcdi->state = SFC_MCDI_UNINITIALIZED;
+ return rc;
+}
+
+void
+sfc_mcdi_fini(struct sfc_adapter *sa)
+{
+ struct sfc_mcdi *mcdi;
+ efx_mcdi_transport_t *emtp;
+
+ sfc_log_init(sa, "entry");
+
+ mcdi = &sa->mcdi;
+ emtp = &mcdi->transport;
+
+ rte_spinlock_lock(&mcdi->lock);
+
+ SFC_ASSERT(mcdi->state == SFC_MCDI_INITIALIZED);
+ mcdi->state = SFC_MCDI_UNINITIALIZED;
+
+ sfc_log_init(sa, "fini MCDI");
+ efx_mcdi_fini(sa->nic);
+ memset(emtp, 0, sizeof(*emtp));
+
+ rte_spinlock_unlock(&mcdi->lock);
+
+ sfc_dma_free(sa, &mcdi->mem);
+}
diff --git a/drivers/net/sfc/sfc_port.c b/drivers/net/sfc/sfc_port.c
new file mode 100644
index 00000000..ee96bcde
--- /dev/null
+++ b/drivers/net/sfc/sfc_port.c
@@ -0,0 +1,475 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_log.h"
+#include "sfc_kvargs.h"
+
+/** Default MAC statistics update period is 1 second */
+#define SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF MS_PER_S
+
+/** The number of microseconds to sleep on attempt to get statistics update */
+#define SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US 10
+
+/** The number of attempts to await arrival of freshly generated statistics */
+#define SFC_MAC_STATS_UPDATE_NB_ATTEMPTS 50
+
+/**
+ * Update MAC statistics in the buffer.
+ *
+ * @param sa Adapter
+ *
+ * @return Status code
+ * @retval 0 Success
+ * @retval EAGAIN Try again
+ * @retval ENOMEM Memory allocation failure
+ */
+int
+sfc_port_update_mac_stats(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ efsys_mem_t *esmp = &port->mac_stats_dma_mem;
+ uint32_t *genp = NULL;
+ uint32_t gen_old;
+ unsigned int nb_attempts = 0;
+ int rc;
+
+ SFC_ASSERT(rte_spinlock_is_locked(&port->mac_stats_lock));
+
+ if (sa->state != SFC_ADAPTER_STARTED)
+ return EINVAL;
+
+ /*
+ * If periodic statistics DMA'ing is off or if not supported,
+ * make a manual request and keep an eye on timer if need be
+ */
+ if (!port->mac_stats_periodic_dma_supported ||
+ (port->mac_stats_update_period_ms == 0)) {
+ if (port->mac_stats_update_period_ms != 0) {
+ uint64_t timestamp = sfc_get_system_msecs();
+
+ if ((timestamp -
+ port->mac_stats_last_request_timestamp) <
+ port->mac_stats_update_period_ms)
+ return 0;
+
+ port->mac_stats_last_request_timestamp = timestamp;
+ }
+
+ rc = efx_mac_stats_upload(sa->nic, esmp);
+ if (rc != 0)
+ return rc;
+
+ genp = &port->mac_stats_update_generation;
+ gen_old = *genp;
+ }
+
+ do {
+ if (nb_attempts > 0)
+ rte_delay_us(SFC_MAC_STATS_UPDATE_RETRY_INTERVAL_US);
+
+ rc = efx_mac_stats_update(sa->nic, esmp,
+ port->mac_stats_buf, genp);
+ if (rc != 0)
+ return rc;
+
+ } while ((genp != NULL) && (*genp == gen_old) &&
+ (++nb_attempts < SFC_MAC_STATS_UPDATE_NB_ATTEMPTS));
+
+ return 0;
+}
+
+int
+sfc_port_reset_mac_stats(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ rte_spinlock_lock(&port->mac_stats_lock);
+ rc = efx_mac_stats_clear(sa->nic);
+ rte_spinlock_unlock(&port->mac_stats_lock);
+
+ return rc;
+}
+
+static int
+sfc_port_init_dev_link(struct sfc_adapter *sa)
+{
+ struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
+ int rc;
+ efx_link_mode_t link_mode;
+ struct rte_eth_link current_link;
+
+ rc = efx_port_poll(sa->nic, &link_mode);
+ if (rc != 0)
+ return rc;
+
+ sfc_port_link_mode_to_info(link_mode, &current_link);
+
+ EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
+ rte_atomic64_set((rte_atomic64_t *)dev_link,
+ *(uint64_t *)&current_link);
+
+ return 0;
+}
+
+int
+sfc_port_start(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+ uint32_t phy_adv_cap;
+ const uint32_t phy_pause_caps =
+ ((1u << EFX_PHY_CAP_PAUSE) | (1u << EFX_PHY_CAP_ASYM));
+
+ sfc_log_init(sa, "entry");
+
+ sfc_log_init(sa, "init filters");
+ rc = efx_filter_init(sa->nic);
+ if (rc != 0)
+ goto fail_filter_init;
+
+ sfc_log_init(sa, "init port");
+ rc = efx_port_init(sa->nic);
+ if (rc != 0)
+ goto fail_port_init;
+
+ sfc_log_init(sa, "set flow control to %#x autoneg=%u",
+ port->flow_ctrl, port->flow_ctrl_autoneg);
+ rc = efx_mac_fcntl_set(sa->nic, port->flow_ctrl,
+ port->flow_ctrl_autoneg);
+ if (rc != 0)
+ goto fail_mac_fcntl_set;
+
+ /* Preserve pause capabilities set by above efx_mac_fcntl_set() */
+ efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_CURRENT, &phy_adv_cap);
+ SFC_ASSERT((port->phy_adv_cap & phy_pause_caps) == 0);
+ phy_adv_cap = port->phy_adv_cap | (phy_adv_cap & phy_pause_caps);
+
+ sfc_log_init(sa, "set phy adv caps to %#x", phy_adv_cap);
+ rc = efx_phy_adv_cap_set(sa->nic, phy_adv_cap);
+ if (rc != 0)
+ goto fail_phy_adv_cap_set;
+
+ sfc_log_init(sa, "set MAC PDU %u", (unsigned int)port->pdu);
+ rc = efx_mac_pdu_set(sa->nic, port->pdu);
+ if (rc != 0)
+ goto fail_mac_pdu_set;
+
+ sfc_log_init(sa, "set MAC address");
+ rc = efx_mac_addr_set(sa->nic,
+ sa->eth_dev->data->mac_addrs[0].addr_bytes);
+ if (rc != 0)
+ goto fail_mac_addr_set;
+
+ sfc_log_init(sa, "set MAC filters");
+ port->promisc = (sa->eth_dev->data->promiscuous != 0) ?
+ B_TRUE : B_FALSE;
+ port->allmulti = (sa->eth_dev->data->all_multicast != 0) ?
+ B_TRUE : B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ goto fail_mac_filter_set;
+
+ sfc_log_init(sa, "set multicast address list");
+ rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
+ port->nb_mcast_addrs);
+ if (rc != 0)
+ goto fail_mcast_address_list_set;
+
+ if (port->mac_stats_reset_pending) {
+ rc = sfc_port_reset_mac_stats(sa);
+ if (rc != 0)
+ sfc_err(sa, "statistics reset failed (requested "
+ "before the port was started)");
+
+ port->mac_stats_reset_pending = B_FALSE;
+ }
+
+ efx_mac_stats_get_mask(sa->nic, port->mac_stats_mask,
+ sizeof(port->mac_stats_mask));
+
+ port->mac_stats_update_generation = 0;
+
+ if (port->mac_stats_update_period_ms != 0) {
+ /*
+ * Update MAC stats using periodic DMA;
+ * any positive update interval different from
+ * 1000 ms can be set only on SFN8xxx provided
+ * that FW version is 6.2.1.1033 or higher
+ */
+ sfc_log_init(sa, "request MAC stats DMA'ing");
+ rc = efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
+ port->mac_stats_update_period_ms,
+ B_FALSE);
+ if (rc == 0) {
+ port->mac_stats_periodic_dma_supported = B_TRUE;
+ } else if (rc == EOPNOTSUPP) {
+ port->mac_stats_periodic_dma_supported = B_FALSE;
+ port->mac_stats_last_request_timestamp = 0;
+ } else {
+ goto fail_mac_stats_periodic;
+ }
+ }
+
+ sfc_log_init(sa, "disable MAC drain");
+ rc = efx_mac_drain(sa->nic, B_FALSE);
+ if (rc != 0)
+ goto fail_mac_drain;
+
+ /* Synchronize link status knowledge */
+ rc = sfc_port_init_dev_link(sa);
+ if (rc != 0)
+ goto fail_port_init_dev_link;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_port_init_dev_link:
+ (void)efx_mac_drain(sa->nic, B_TRUE);
+
+fail_mac_drain:
+ (void)efx_mac_stats_periodic(sa->nic, &port->mac_stats_dma_mem,
+ 0, B_FALSE);
+
+fail_mac_stats_periodic:
+fail_mcast_address_list_set:
+fail_mac_filter_set:
+fail_mac_addr_set:
+fail_mac_pdu_set:
+fail_phy_adv_cap_set:
+fail_mac_fcntl_set:
+ efx_port_fini(sa->nic);
+
+fail_port_init:
+ efx_filter_fini(sa->nic);
+
+fail_filter_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_port_stop(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+
+ efx_mac_drain(sa->nic, B_TRUE);
+
+ (void)efx_mac_stats_periodic(sa->nic, &sa->port.mac_stats_dma_mem,
+ 0, B_FALSE);
+
+ efx_port_fini(sa->nic);
+ efx_filter_fini(sa->nic);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_port_configure(struct sfc_adapter *sa)
+{
+ const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
+ struct sfc_port *port = &sa->port;
+
+ sfc_log_init(sa, "entry");
+
+ if (dev_data->dev_conf.rxmode.jumbo_frame)
+ port->pdu = dev_data->dev_conf.rxmode.max_rx_pkt_len;
+ else
+ port->pdu = EFX_MAC_PDU(dev_data->mtu);
+
+ return 0;
+}
+
+void
+sfc_port_close(struct sfc_adapter *sa)
+{
+ sfc_log_init(sa, "entry");
+}
+
+int
+sfc_port_attach(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ long kvarg_stats_update_period_ms;
+ int rc;
+
+ sfc_log_init(sa, "entry");
+
+ efx_phy_adv_cap_get(sa->nic, EFX_PHY_CAP_PERM, &port->phy_adv_cap_mask);
+
+ /* Enable flow control by default */
+ port->flow_ctrl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
+ port->flow_ctrl_autoneg = B_TRUE;
+
+ port->max_mcast_addrs = EFX_MAC_MULTICAST_LIST_MAX;
+ port->nb_mcast_addrs = 0;
+ port->mcast_addrs = rte_calloc_socket("mcast_addr_list_buf",
+ port->max_mcast_addrs,
+ EFX_MAC_ADDR_LEN, 0,
+ sa->socket_id);
+ if (port->mcast_addrs == NULL) {
+ rc = ENOMEM;
+ goto fail_mcast_addr_list_buf_alloc;
+ }
+
+ rte_spinlock_init(&port->mac_stats_lock);
+
+ rc = ENOMEM;
+ port->mac_stats_buf = rte_calloc_socket("mac_stats_buf", EFX_MAC_NSTATS,
+ sizeof(uint64_t), 0,
+ sa->socket_id);
+ if (port->mac_stats_buf == NULL)
+ goto fail_mac_stats_buf_alloc;
+
+ rc = sfc_dma_alloc(sa, "mac_stats", 0, EFX_MAC_STATS_SIZE,
+ sa->socket_id, &port->mac_stats_dma_mem);
+ if (rc != 0)
+ goto fail_mac_stats_dma_alloc;
+
+ port->mac_stats_reset_pending = B_FALSE;
+
+ kvarg_stats_update_period_ms = SFC_MAC_STATS_UPDATE_PERIOD_MS_DEF;
+
+ rc = sfc_kvargs_process(sa, SFC_KVARG_STATS_UPDATE_PERIOD_MS,
+ sfc_kvarg_long_handler,
+ &kvarg_stats_update_period_ms);
+ if ((rc == 0) &&
+ ((kvarg_stats_update_period_ms < 0) ||
+ (kvarg_stats_update_period_ms > UINT16_MAX))) {
+ sfc_err(sa, "wrong '" SFC_KVARG_STATS_UPDATE_PERIOD_MS "' "
+ "was set (%ld);", kvarg_stats_update_period_ms);
+ sfc_err(sa, "it must not be less than 0 "
+ "or greater than %" PRIu16, UINT16_MAX);
+ rc = EINVAL;
+ goto fail_kvarg_stats_update_period_ms;
+ } else if (rc != 0) {
+ goto fail_kvarg_stats_update_period_ms;
+ }
+
+ port->mac_stats_update_period_ms = kvarg_stats_update_period_ms;
+
+ sfc_log_init(sa, "done");
+ return 0;
+
+fail_kvarg_stats_update_period_ms:
+fail_mac_stats_dma_alloc:
+ rte_free(port->mac_stats_buf);
+fail_mac_stats_buf_alloc:
+fail_mcast_addr_list_buf_alloc:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_port_detach(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+
+ sfc_log_init(sa, "entry");
+
+ sfc_dma_free(sa, &port->mac_stats_dma_mem);
+ rte_free(port->mac_stats_buf);
+
+ sfc_log_init(sa, "done");
+}
+
+int
+sfc_set_rx_mode(struct sfc_adapter *sa)
+{
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ rc = efx_mac_filter_set(sa->nic, port->promisc, B_TRUE,
+ port->promisc || port->allmulti, B_TRUE);
+
+ return rc;
+}
+
+void
+sfc_port_link_mode_to_info(efx_link_mode_t link_mode,
+ struct rte_eth_link *link_info)
+{
+ SFC_ASSERT(link_mode < EFX_LINK_NMODES);
+
+ memset(link_info, 0, sizeof(*link_info));
+ if ((link_mode == EFX_LINK_DOWN) || (link_mode == EFX_LINK_UNKNOWN))
+ link_info->link_status = ETH_LINK_DOWN;
+ else
+ link_info->link_status = ETH_LINK_UP;
+
+ switch (link_mode) {
+ case EFX_LINK_10HDX:
+ link_info->link_speed = ETH_SPEED_NUM_10M;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_10FDX:
+ link_info->link_speed = ETH_SPEED_NUM_10M;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_100HDX:
+ link_info->link_speed = ETH_SPEED_NUM_100M;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_100FDX:
+ link_info->link_speed = ETH_SPEED_NUM_100M;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_1000HDX:
+ link_info->link_speed = ETH_SPEED_NUM_1G;
+ link_info->link_duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case EFX_LINK_1000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_1G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_10000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_10G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case EFX_LINK_40000FDX:
+ link_info->link_speed = ETH_SPEED_NUM_40G;
+ link_info->link_duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ default:
+ SFC_ASSERT(B_FALSE);
+ /* FALLTHROUGH */
+ case EFX_LINK_UNKNOWN:
+ case EFX_LINK_DOWN:
+ link_info->link_speed = ETH_SPEED_NUM_NONE;
+ link_info->link_duplex = 0;
+ break;
+ }
+
+ link_info->link_autoneg = ETH_LINK_AUTONEG;
+}
diff --git a/drivers/net/sfc/sfc_rx.c b/drivers/net/sfc/sfc_rx.c
new file mode 100644
index 00000000..2ecd6f26
--- /dev/null
+++ b/drivers/net/sfc/sfc_rx.c
@@ -0,0 +1,1327 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_mempool.h>
+
+#include "efx.h"
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_rx.h"
+#include "sfc_kvargs.h"
+#include "sfc_tweak.h"
+
+/*
+ * Maximum number of Rx queue flush attempt in the case of failure or
+ * flush timeout
+ */
+#define SFC_RX_QFLUSH_ATTEMPTS (3)
+
+/*
+ * Time to wait between event queue polling attempts when waiting for Rx
+ * queue flush done or failed events.
+ */
+#define SFC_RX_QFLUSH_POLL_WAIT_MS (1)
+
+/*
+ * Maximum number of event queue polling attempts when waiting for Rx queue
+ * flush done or failed events. It defines Rx queue flush attempt timeout
+ * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
+ */
+#define SFC_RX_QFLUSH_POLL_ATTEMPTS (2000)
+
+void
+sfc_rx_qflush_done(struct sfc_rxq *rxq)
+{
+ rxq->state |= SFC_RXQ_FLUSHED;
+ rxq->state &= ~SFC_RXQ_FLUSHING;
+}
+
+void
+sfc_rx_qflush_failed(struct sfc_rxq *rxq)
+{
+ rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ rxq->state &= ~SFC_RXQ_FLUSHING;
+}
+
+static void
+sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
+{
+ unsigned int free_space;
+ unsigned int bulks;
+ void *objs[SFC_RX_REFILL_BULK];
+ efsys_dma_addr_t addr[RTE_DIM(objs)];
+ unsigned int added = rxq->added;
+ unsigned int id;
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ uint16_t port_id = rxq->dp.dpq.port_id;
+
+ free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
+ (added - rxq->completed);
+
+ if (free_space < rxq->refill_threshold)
+ return;
+
+ bulks = free_space / RTE_DIM(objs);
+ /* refill_threshold guarantees that bulks is positive */
+ SFC_ASSERT(bulks > 0);
+
+ id = added & rxq->ptr_mask;
+ do {
+ if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
+ RTE_DIM(objs)) < 0)) {
+ /*
+ * It is hardly a safe way to increment counter
+ * from different contexts, but all PMDs do it.
+ */
+ rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
+ RTE_DIM(objs);
+ /* Return if we have posted nothing yet */
+ if (added == rxq->added)
+ return;
+ /* Push posted */
+ break;
+ }
+
+ for (i = 0; i < RTE_DIM(objs);
+ ++i, id = (id + 1) & rxq->ptr_mask) {
+ m = objs[i];
+
+ rxd = &rxq->sw_desc[id];
+ rxd->mbuf = m;
+
+ SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+ SFC_ASSERT(m->next == NULL);
+ SFC_ASSERT(m->nb_segs == 1);
+ m->port = port_id;
+
+ addr[i] = rte_pktmbuf_mtophys(m);
+ }
+
+ efx_rx_qpost(rxq->common, addr, rxq->buf_size,
+ RTE_DIM(objs), rxq->completed, added);
+ added += RTE_DIM(objs);
+ } while (--bulks > 0);
+
+ SFC_ASSERT(added != rxq->added);
+ rxq->added = added;
+ efx_rx_qpush(rxq->common, added, &rxq->pushed);
+}
+
+static uint64_t
+sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
+{
+ uint64_t mbuf_flags = 0;
+
+ switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
+ case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
+ mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
+ break;
+ case EFX_PKT_IPV4:
+ mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
+ break;
+ default:
+ RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
+ PKT_RX_IP_CKSUM_UNKNOWN);
+ break;
+ }
+
+ switch ((desc_flags &
+ (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
+ case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
+ case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
+ mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case EFX_PKT_TCP:
+ case EFX_PKT_UDP:
+ mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
+ break;
+ default:
+ RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
+ SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
+ PKT_RX_L4_CKSUM_UNKNOWN);
+ break;
+ }
+
+ return mbuf_flags;
+}
+
+static uint32_t
+sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
+{
+ return RTE_PTYPE_L2_ETHER |
+ ((desc_flags & EFX_PKT_IPV4) ?
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
+ ((desc_flags & EFX_PKT_IPV6) ?
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
+ ((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
+ ((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
+}
+
+static const uint32_t *
+sfc_efx_supported_ptypes_get(void)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_UNKNOWN
+ };
+
+ return ptypes;
+}
+
+static void
+sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
+ struct rte_mbuf *m)
+{
+#if EFSYS_OPT_RX_SCALE
+ uint8_t *mbuf_data;
+
+
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
+ return;
+
+ mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
+
+ if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
+ m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
+ EFX_RX_HASHALG_TOEPLITZ,
+ mbuf_data);
+
+ m->ol_flags |= PKT_RX_RSS_HASH;
+ }
+#endif
+}
+
+static uint16_t
+sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_dp_rxq *dp_rxq = rx_queue;
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ unsigned int completed;
+ unsigned int prefix_size = rxq->prefix_size;
+ unsigned int done_pkts = 0;
+ boolean_t discard_next = B_FALSE;
+ struct rte_mbuf *scatter_pkt = NULL;
+
+ if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
+ return 0;
+
+ sfc_ev_qpoll(rxq->evq);
+
+ completed = rxq->completed;
+ while (completed != rxq->pending && done_pkts < nb_pkts) {
+ unsigned int id;
+ struct sfc_efx_rx_sw_desc *rxd;
+ struct rte_mbuf *m;
+ unsigned int seg_len;
+ unsigned int desc_flags;
+
+ id = completed++ & rxq->ptr_mask;
+ rxd = &rxq->sw_desc[id];
+ m = rxd->mbuf;
+ desc_flags = rxd->flags;
+
+ if (discard_next)
+ goto discard;
+
+ if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
+ goto discard;
+
+ if (desc_flags & EFX_PKT_PREFIX_LEN) {
+ uint16_t tmp_size;
+ int rc __rte_unused;
+
+ rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
+ rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
+ SFC_ASSERT(rc == 0);
+ seg_len = tmp_size;
+ } else {
+ seg_len = rxd->size - prefix_size;
+ }
+
+ rte_pktmbuf_data_len(m) = seg_len;
+ rte_pktmbuf_pkt_len(m) = seg_len;
+
+ if (scatter_pkt != NULL) {
+ if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
+ rte_pktmbuf_free(scatter_pkt);
+ goto discard;
+ }
+ /* The packet to deliver */
+ m = scatter_pkt;
+ }
+
+ if (desc_flags & EFX_PKT_CONT) {
+ /* The packet is scattered, more fragments to come */
+ scatter_pkt = m;
+ /* Futher fragments have no prefix */
+ prefix_size = 0;
+ continue;
+ }
+
+ /* Scattered packet is done */
+ scatter_pkt = NULL;
+ /* The first fragment of the packet has prefix */
+ prefix_size = rxq->prefix_size;
+
+ m->ol_flags =
+ sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
+ m->packet_type =
+ sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
+
+ /*
+ * Extract RSS hash from the packet prefix and
+ * set the corresponding field (if needed and possible)
+ */
+ sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
+
+ m->data_off += prefix_size;
+
+ *rx_pkts++ = m;
+ done_pkts++;
+ continue;
+
+discard:
+ discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
+ rte_mempool_put(rxq->refill_mb_pool, m);
+ rxd->mbuf = NULL;
+ }
+
+ /* pending is only moved when entire packet is received */
+ SFC_ASSERT(scatter_pkt == NULL);
+
+ rxq->completed = completed;
+
+ sfc_efx_rx_qrefill(rxq);
+
+ return done_pkts;
+}
+
+static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
+static unsigned int
+sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
+ return 0;
+
+ sfc_ev_qpoll(rxq->evq);
+
+ return rxq->pending - rxq->completed;
+}
+
+struct sfc_rxq *
+sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
+{
+ const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->rxq_count);
+ rxq = sa->rxq_info[dpq->queue_id].rxq;
+
+ SFC_ASSERT(rxq != NULL);
+ return rxq;
+}
+
+static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
+static int
+sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr, int socket_id,
+ const struct sfc_dp_rx_qcreate_info *info,
+ struct sfc_dp_rxq **dp_rxqp)
+{
+ struct sfc_efx_rxq *rxq;
+ int rc;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
+ info->rxq_entries,
+ sizeof(*rxq->sw_desc),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (rxq->sw_desc == NULL)
+ goto fail_desc_alloc;
+
+ /* efx datapath is bound to efx control path */
+ rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
+ if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
+ rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
+ rxq->ptr_mask = info->rxq_entries - 1;
+ rxq->batch_max = info->batch_max;
+ rxq->prefix_size = info->prefix_size;
+ rxq->refill_threshold = info->refill_threshold;
+ rxq->buf_size = info->buf_size;
+ rxq->refill_mb_pool = info->refill_mb_pool;
+
+ *dp_rxqp = &rxq->dp;
+ return 0;
+
+fail_desc_alloc:
+ rte_free(rxq);
+
+fail_rxq_alloc:
+ return rc;
+}
+
+static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
+static void
+sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rte_free(rxq->sw_desc);
+ rte_free(rxq);
+}
+
+static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
+static int
+sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int evq_read_ptr)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->common = crxq->common;
+
+ rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
+
+ sfc_efx_rx_qrefill(rxq);
+
+ rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
+
+ return 0;
+}
+
+static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
+static void
+sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
+ __rte_unused unsigned int *evq_read_ptr)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
+
+ /* libefx-based datapath is bound to libefx-based PMD and uses
+ * event queue structure directly. So, there is no necessity to
+ * return EvQ read pointer.
+ */
+}
+
+static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
+static void
+sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
+{
+ struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
+ unsigned int i;
+ struct sfc_efx_rx_sw_desc *rxd;
+
+ for (i = rxq->completed; i != rxq->added; ++i) {
+ rxd = &rxq->sw_desc[i & rxq->ptr_mask];
+ rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
+ rxd->mbuf = NULL;
+ /* Packed stream relies on 0 in inactive SW desc.
+ * Rx queue stop is not performance critical, so
+ * there is no harm to do it always.
+ */
+ rxd->flags = 0;
+ rxd->size = 0;
+ }
+
+ rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
+}
+
+struct sfc_dp_rx sfc_efx_rx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_RX,
+ .hw_fw_caps = 0,
+ },
+ .features = SFC_DP_RX_FEAT_SCATTER,
+ .qcreate = sfc_efx_rx_qcreate,
+ .qdestroy = sfc_efx_rx_qdestroy,
+ .qstart = sfc_efx_rx_qstart,
+ .qstop = sfc_efx_rx_qstop,
+ .qpurge = sfc_efx_rx_qpurge,
+ .supported_ptypes_get = sfc_efx_supported_ptypes_get,
+ .qdesc_npending = sfc_efx_rx_qdesc_npending,
+ .pkt_burst = sfc_efx_recv_pkts,
+};
+
+unsigned int
+sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+ rxq = sa->rxq_info[sw_index].rxq;
+
+ if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
+ return 0;
+
+ return sa->dp_rx->qdesc_npending(rxq->dp);
+}
+
+int
+sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
+{
+ struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
+
+ return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
+}
+
+static void
+sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq *rxq;
+ unsigned int retry_count;
+ unsigned int wait_count;
+
+ rxq = sa->rxq_info[sw_index].rxq;
+ SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
+
+ /*
+ * Retry Rx queue flushing in the case of flush failed or
+ * timeout. In the worst case it can delay for 6 seconds.
+ */
+ for (retry_count = 0;
+ ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
+ (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
+ ++retry_count) {
+ if (efx_rx_qflush(rxq->common) != 0) {
+ rxq->state |= SFC_RXQ_FLUSH_FAILED;
+ break;
+ }
+ rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
+ rxq->state |= SFC_RXQ_FLUSHING;
+
+ /*
+ * Wait for Rx queue flush done or failed event at least
+ * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
+ * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
+ * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
+ */
+ wait_count = 0;
+ do {
+ rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
+ sfc_ev_qpoll(rxq->evq);
+ } while ((rxq->state & SFC_RXQ_FLUSHING) &&
+ (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
+
+ if (rxq->state & SFC_RXQ_FLUSHING)
+ sfc_err(sa, "RxQ %u flush timed out", sw_index);
+
+ if (rxq->state & SFC_RXQ_FLUSH_FAILED)
+ sfc_err(sa, "RxQ %u flush failed", sw_index);
+
+ if (rxq->state & SFC_RXQ_FLUSHED)
+ sfc_info(sa, "RxQ %u flushed", sw_index);
+ }
+
+ sa->dp_rx->qpurge(rxq->dp);
+}
+
+static int
+sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
+{
+ boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
+ struct sfc_port *port = &sa->port;
+ int rc;
+
+ /*
+ * If promiscuous or all-multicast mode has been requested, setting
+ * filter for the default Rx queue might fail, in particular, while
+ * running over PCI function which is not a member of corresponding
+ * privilege groups; if this occurs, few iterations will be made to
+ * repeat this step without promiscuous and all-multicast flags set
+ */
+retry:
+ rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
+ if (rc == 0)
+ return 0;
+ else if (rc != EOPNOTSUPP)
+ return rc;
+
+ if (port->promisc) {
+ sfc_warn(sa, "promiscuous mode has been requested, "
+ "but the HW rejects it");
+ sfc_warn(sa, "promiscuous mode will be disabled");
+
+ port->promisc = B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ return rc;
+
+ goto retry;
+ }
+
+ if (port->allmulti) {
+ sfc_warn(sa, "all-multicast mode has been requested, "
+ "but the HW rejects it");
+ sfc_warn(sa, "all-multicast mode will be disabled");
+
+ port->allmulti = B_FALSE;
+ rc = sfc_set_rx_mode(sa);
+ if (rc != 0)
+ return rc;
+
+ goto retry;
+ }
+
+ return rc;
+}
+
+int
+sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+ struct sfc_evq *evq;
+ int rc;
+
+ sfc_log_init(sa, "sw_index=%u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
+
+ evq = rxq->evq;
+
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
+ if (rc != 0)
+ goto fail_ev_qstart;
+
+ rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
+ &rxq->mem, rxq_info->entries,
+ 0 /* not used on EF10 */, evq->common,
+ &rxq->common);
+ if (rc != 0)
+ goto fail_rx_qcreate;
+
+ efx_rx_qenable(rxq->common);
+
+ rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
+ if (rc != 0)
+ goto fail_dp_qstart;
+
+ rxq->state |= SFC_RXQ_STARTED;
+
+ if (sw_index == 0) {
+ rc = sfc_rx_default_rxq_set_filter(sa, rxq);
+ if (rc != 0)
+ goto fail_mac_filter_default_rxq_set;
+ }
+
+ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
+ sa->eth_dev->data->rx_queue_state[sw_index] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+
+fail_mac_filter_default_rxq_set:
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+
+fail_dp_qstart:
+ sfc_rx_qflush(sa, sw_index);
+
+fail_rx_qcreate:
+ sfc_ev_qstop(evq);
+
+fail_ev_qstart:
+ return rc;
+}
+
+void
+sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ sfc_log_init(sa, "sw_index=%u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+ rxq = rxq_info->rxq;
+
+ if (rxq->state == SFC_RXQ_INITIALIZED)
+ return;
+ SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
+
+ /* It seems to be used by DPDK for debug purposes only ('rte_ether') */
+ sa->eth_dev->data->rx_queue_state[sw_index] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+
+ sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
+
+ if (sw_index == 0)
+ efx_mac_filter_default_rxq_clear(sa->nic);
+
+ sfc_rx_qflush(sa, sw_index);
+
+ rxq->state = SFC_RXQ_INITIALIZED;
+
+ efx_rx_qdestroy(rxq->common);
+
+ sfc_ev_qstop(rxq->evq);
+}
+
+static int
+sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
+ const struct rte_eth_rxconf *rx_conf)
+{
+ const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
+ int rc = 0;
+
+ if (rx_conf->rx_thresh.pthresh != 0 ||
+ rx_conf->rx_thresh.hthresh != 0 ||
+ rx_conf->rx_thresh.wthresh != 0) {
+ sfc_err(sa,
+ "RxQ prefetch/host/writeback thresholds are not supported");
+ rc = EINVAL;
+ }
+
+ if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
+ sfc_err(sa,
+ "RxQ free threshold too large: %u vs maximum %u",
+ rx_conf->rx_free_thresh, rx_free_thresh_max);
+ rc = EINVAL;
+ }
+
+ if (rx_conf->rx_drop_en == 0) {
+ sfc_err(sa, "RxQ drop disable is not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+static unsigned int
+sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
+{
+ uint32_t data_off;
+ uint32_t order;
+
+ /* The mbuf object itself is always cache line aligned */
+ order = rte_bsf32(RTE_CACHE_LINE_SIZE);
+
+ /* Data offset from mbuf object start */
+ data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
+ RTE_PKTMBUF_HEADROOM;
+
+ order = MIN(order, rte_bsf32(data_off));
+
+ return 1u << (order - 1);
+}
+
+static uint16_t
+sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
+ const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
+ uint16_t buf_size;
+ unsigned int buf_aligned;
+ unsigned int start_alignment;
+ unsigned int end_padding_alignment;
+
+ /* Below it is assumed that both alignments are power of 2 */
+ SFC_ASSERT(rte_is_power_of_2(nic_align_start));
+ SFC_ASSERT(rte_is_power_of_2(nic_align_end));
+
+ /*
+ * mbuf is always cache line aligned, double-check
+ * that it meets rx buffer start alignment requirements.
+ */
+
+ /* Start from mbuf pool data room size */
+ buf_size = rte_pktmbuf_data_room_size(mb_pool);
+
+ /* Remove headroom */
+ if (buf_size <= RTE_PKTMBUF_HEADROOM) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
+ mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
+ return 0;
+ }
+ buf_size -= RTE_PKTMBUF_HEADROOM;
+
+ /* Calculate guaranteed data start alignment */
+ buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
+
+ /* Reserve space for start alignment */
+ if (buf_aligned < nic_align_start) {
+ start_alignment = nic_align_start - buf_aligned;
+ if (buf_size <= start_alignment) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
+ mb_pool->name,
+ rte_pktmbuf_data_room_size(mb_pool),
+ RTE_PKTMBUF_HEADROOM, start_alignment);
+ return 0;
+ }
+ buf_aligned = nic_align_start;
+ buf_size -= start_alignment;
+ } else {
+ start_alignment = 0;
+ }
+
+ /* Make sure that end padding does not write beyond the buffer */
+ if (buf_aligned < nic_align_end) {
+ /*
+ * Estimate space which can be lost. If guarnteed buffer
+ * size is odd, lost space is (nic_align_end - 1). More
+ * accurate formula is below.
+ */
+ end_padding_alignment = nic_align_end -
+ MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
+ if (buf_size <= end_padding_alignment) {
+ sfc_err(sa,
+ "RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
+ mb_pool->name,
+ rte_pktmbuf_data_room_size(mb_pool),
+ RTE_PKTMBUF_HEADROOM, start_alignment,
+ end_padding_alignment);
+ return 0;
+ }
+ buf_size -= end_padding_alignment;
+ } else {
+ /*
+ * Start is aligned the same or better than end,
+ * just align length.
+ */
+ buf_size = P2ALIGN(buf_size, nic_align_end);
+ }
+
+ return buf_size;
+}
+
+int
+sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ int rc;
+ uint16_t buf_size;
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_evq *evq;
+ struct sfc_rxq *rxq;
+ struct sfc_dp_rx_qcreate_info info;
+
+ rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
+ if (rc != 0)
+ goto fail_bad_conf;
+
+ buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
+ if (buf_size == 0) {
+ sfc_err(sa, "RxQ %u mbuf pool object size is too small",
+ sw_index);
+ rc = EINVAL;
+ goto fail_bad_conf;
+ }
+
+ if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
+ !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
+ sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
+ "object size is too small", sw_index);
+ sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
+ "PDU size %u plus Rx prefix %u bytes",
+ sw_index, buf_size, (unsigned int)sa->port.pdu,
+ encp->enc_rx_prefix_size);
+ rc = EINVAL;
+ goto fail_bad_conf;
+ }
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+ rxq_info = &sa->rxq_info[sw_index];
+
+ SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
+ rxq_info->entries = nb_rx_desc;
+ rxq_info->type =
+ sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
+ EFX_RXQ_TYPE_SCATTER : EFX_RXQ_TYPE_DEFAULT;
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
+ rxq_info->entries, socket_id, &evq);
+ if (rc != 0)
+ goto fail_ev_qinit;
+
+ rc = ENOMEM;
+ rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (rxq == NULL)
+ goto fail_rxq_alloc;
+
+ rxq_info->rxq = rxq;
+
+ rxq->evq = evq;
+ rxq->hw_index = sw_index;
+ rxq->refill_threshold =
+ RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
+ rxq->refill_mb_pool = mb_pool;
+
+ rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
+ socket_id, &rxq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ memset(&info, 0, sizeof(info));
+ info.refill_mb_pool = rxq->refill_mb_pool;
+ info.refill_threshold = rxq->refill_threshold;
+ info.buf_size = buf_size;
+ info.batch_max = encp->enc_rx_batch_max;
+ info.prefix_size = encp->enc_rx_prefix_size;
+
+#if EFSYS_OPT_RX_SCALE
+ if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
+ info.flags |= SFC_RXQ_FLAG_RSS_HASH;
+#endif
+
+ info.rxq_entries = rxq_info->entries;
+ info.rxq_hw_ring = rxq->mem.esm_base;
+ info.evq_entries = rxq_info->entries;
+ info.evq_hw_ring = evq->mem.esm_base;
+ info.hw_index = rxq->hw_index;
+ info.mem_bar = sa->mem_bar.esb_base;
+
+ rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &rxq->dp);
+ if (rc != 0)
+ goto fail_dp_rx_qcreate;
+
+ evq->dp_rxq = rxq->dp;
+
+ rxq->state = SFC_RXQ_INITIALIZED;
+
+ rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
+
+ return 0;
+
+fail_dp_rx_qcreate:
+ sfc_dma_free(sa, &rxq->mem);
+
+fail_dma_alloc:
+ rxq_info->rxq = NULL;
+ rte_free(rxq);
+
+fail_rxq_alloc:
+ sfc_ev_qfini(evq);
+
+fail_ev_qinit:
+ rxq_info->entries = 0;
+
+fail_bad_conf:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info;
+ struct sfc_rxq *rxq;
+
+ SFC_ASSERT(sw_index < sa->rxq_count);
+
+ rxq_info = &sa->rxq_info[sw_index];
+
+ rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
+
+ sa->dp_rx->qdestroy(rxq->dp);
+ rxq->dp = NULL;
+
+ rxq_info->rxq = NULL;
+ rxq_info->entries = 0;
+
+ sfc_dma_free(sa, &rxq->mem);
+
+ sfc_ev_qfini(rxq->evq);
+ rxq->evq = NULL;
+
+ rte_free(rxq);
+}
+
+#if EFSYS_OPT_RX_SCALE
+efx_rx_hash_type_t
+sfc_rte_to_efx_hash_type(uint64_t rss_hf)
+{
+ efx_rx_hash_type_t efx_hash_types = 0;
+
+ if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
+ efx_hash_types |= EFX_RX_HASH_IPV4;
+
+ if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
+ efx_hash_types |= EFX_RX_HASH_TCPIPV4;
+
+ if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
+ efx_hash_types |= EFX_RX_HASH_IPV6;
+
+ if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
+ efx_hash_types |= EFX_RX_HASH_TCPIPV6;
+
+ return efx_hash_types;
+}
+
+uint64_t
+sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
+{
+ uint64_t rss_hf = 0;
+
+ if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
+ rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER);
+
+ if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
+ rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+
+ if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
+ rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
+ ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
+
+ if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
+ rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
+
+ return rss_hf;
+}
+#endif
+
+static int
+sfc_rx_rss_config(struct sfc_adapter *sa)
+{
+ int rc = 0;
+
+#if EFSYS_OPT_RX_SCALE
+ if (sa->rss_channels > 0) {
+ rc = efx_rx_scale_mode_set(sa->nic, EFX_RX_HASHALG_TOEPLITZ,
+ sa->rss_hash_types, B_TRUE);
+ if (rc != 0)
+ goto finish;
+
+ rc = efx_rx_scale_key_set(sa->nic, sa->rss_key,
+ sizeof(sa->rss_key));
+ if (rc != 0)
+ goto finish;
+
+ rc = efx_rx_scale_tbl_set(sa->nic, sa->rss_tbl,
+ sizeof(sa->rss_tbl));
+ }
+
+finish:
+#endif
+ return rc;
+}
+
+int
+sfc_rx_start(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+ int rc;
+
+ sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+
+ rc = efx_rx_init(sa->nic);
+ if (rc != 0)
+ goto fail_rx_init;
+
+ rc = sfc_rx_rss_config(sa);
+ if (rc != 0)
+ goto fail_rss_config;
+
+ for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
+ if ((!sa->rxq_info[sw_index].deferred_start ||
+ sa->rxq_info[sw_index].deferred_started)) {
+ rc = sfc_rx_qstart(sa, sw_index);
+ if (rc != 0)
+ goto fail_rx_qstart;
+ }
+ }
+
+ return 0;
+
+fail_rx_qstart:
+ while (sw_index-- > 0)
+ sfc_rx_qstop(sa, sw_index);
+
+fail_rss_config:
+ efx_rx_fini(sa->nic);
+
+fail_rx_init:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+void
+sfc_rx_stop(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+
+ sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
+
+ sw_index = sa->rxq_count;
+ while (sw_index-- > 0) {
+ if (sa->rxq_info[sw_index].rxq != NULL)
+ sfc_rx_qstop(sa, sw_index);
+ }
+
+ efx_rx_fini(sa->nic);
+}
+
+static int
+sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
+ unsigned int max_entries;
+
+ max_entries = EFX_RXQ_MAXNDESCS;
+ SFC_ASSERT(rte_is_power_of_2(max_entries));
+
+ rxq_info->max_entries = max_entries;
+
+ return 0;
+}
+
+static int
+sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
+{
+ int rc = 0;
+
+ switch (rxmode->mq_mode) {
+ case ETH_MQ_RX_NONE:
+ /* No special checks are required */
+ break;
+#if EFSYS_OPT_RX_SCALE
+ case ETH_MQ_RX_RSS:
+ if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ sfc_err(sa, "RSS is not available");
+ rc = EINVAL;
+ }
+ break;
+#endif
+ default:
+ sfc_err(sa, "Rx multi-queue mode %u not supported",
+ rxmode->mq_mode);
+ rc = EINVAL;
+ }
+
+ if (rxmode->header_split) {
+ sfc_err(sa, "Header split on Rx not supported");
+ rc = EINVAL;
+ }
+
+ if (rxmode->hw_vlan_filter) {
+ sfc_err(sa, "HW VLAN filtering not supported");
+ rc = EINVAL;
+ }
+
+ if (rxmode->hw_vlan_strip) {
+ sfc_err(sa, "HW VLAN stripping not supported");
+ rc = EINVAL;
+ }
+
+ if (rxmode->hw_vlan_extend) {
+ sfc_err(sa,
+ "Q-in-Q HW VLAN stripping not supported");
+ rc = EINVAL;
+ }
+
+ if (!rxmode->hw_strip_crc) {
+ sfc_warn(sa,
+ "FCS stripping control not supported - always stripped");
+ rxmode->hw_strip_crc = 1;
+ }
+
+ if (rxmode->enable_scatter &&
+ (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
+ sfc_err(sa, "Rx scatter not supported by %s datapath",
+ sa->dp_rx->dp.name);
+ rc = EINVAL;
+ }
+
+ if (rxmode->enable_lro) {
+ sfc_err(sa, "LRO not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
+
+ sw_index = sa->rxq_count;
+ while (--sw_index >= (int)nb_rx_queues) {
+ if (sa->rxq_info[sw_index].rxq != NULL)
+ sfc_rx_qfini(sa, sw_index);
+ }
+
+ sa->rxq_count = nb_rx_queues;
+}
+
+/**
+ * Initialize Rx subsystem.
+ *
+ * Called at device (re)configuration stage when number of receive queues is
+ * specified together with other device level receive configuration.
+ *
+ * It should be used to allocate NUMA-unaware resources.
+ */
+int
+sfc_rx_configure(struct sfc_adapter *sa)
+{
+ struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
+ const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
+ unsigned int sw_index;
+ int rc;
+
+ sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
+ nb_rx_queues, sa->rxq_count);
+
+ rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
+ if (rc != 0)
+ goto fail_check_mode;
+
+ if (nb_rx_queues == sa->rxq_count)
+ goto done;
+
+ if (sa->rxq_info == NULL) {
+ rc = ENOMEM;
+ sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
+ sizeof(sa->rxq_info[0]), 0,
+ sa->socket_id);
+ if (sa->rxq_info == NULL)
+ goto fail_rxqs_alloc;
+ } else {
+ struct sfc_rxq_info *new_rxq_info;
+
+ if (nb_rx_queues < sa->rxq_count)
+ sfc_rx_fini_queues(sa, nb_rx_queues);
+
+ rc = ENOMEM;
+ new_rxq_info =
+ rte_realloc(sa->rxq_info,
+ nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
+ if (new_rxq_info == NULL && nb_rx_queues > 0)
+ goto fail_rxqs_realloc;
+
+ sa->rxq_info = new_rxq_info;
+ if (nb_rx_queues > sa->rxq_count)
+ memset(&sa->rxq_info[sa->rxq_count], 0,
+ (nb_rx_queues - sa->rxq_count) *
+ sizeof(sa->rxq_info[0]));
+ }
+
+ while (sa->rxq_count < nb_rx_queues) {
+ rc = sfc_rx_qinit_info(sa, sa->rxq_count);
+ if (rc != 0)
+ goto fail_rx_qinit_info;
+
+ sa->rxq_count++;
+ }
+
+#if EFSYS_OPT_RX_SCALE
+ sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+ MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+
+ if (sa->rss_channels > 0) {
+ for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
+ sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
+ }
+#endif
+
+done:
+ return 0;
+
+fail_rx_qinit_info:
+fail_rxqs_realloc:
+fail_rxqs_alloc:
+ sfc_rx_close(sa);
+
+fail_check_mode:
+ sfc_log_init(sa, "failed %d", rc);
+ return rc;
+}
+
+/**
+ * Shutdown Rx subsystem.
+ *
+ * Called at device close stage, for example, before device shutdown.
+ */
+void
+sfc_rx_close(struct sfc_adapter *sa)
+{
+ sfc_rx_fini_queues(sa, 0);
+
+ sa->rss_channels = 0;
+
+ rte_free(sa->rxq_info);
+ sa->rxq_info = NULL;
+}
diff --git a/drivers/net/sfc/sfc_rx.h b/drivers/net/sfc/sfc_rx.h
new file mode 100644
index 00000000..9e6282ea
--- /dev/null
+++ b/drivers/net/sfc/sfc_rx.h
@@ -0,0 +1,180 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_RX_H
+#define _SFC_RX_H
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#include <rte_ethdev.h>
+
+#include "efx.h"
+
+#include "sfc_dp_rx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_adapter;
+struct sfc_evq;
+
+/**
+ * Software Rx descriptor information associated with hardware Rx
+ * descriptor.
+ */
+struct sfc_efx_rx_sw_desc {
+ struct rte_mbuf *mbuf;
+ unsigned int flags;
+ unsigned int size;
+};
+
+/** Receive queue state bits */
+enum sfc_rxq_state_bit {
+ SFC_RXQ_INITIALIZED_BIT = 0,
+#define SFC_RXQ_INITIALIZED (1 << SFC_RXQ_INITIALIZED_BIT)
+ SFC_RXQ_STARTED_BIT,
+#define SFC_RXQ_STARTED (1 << SFC_RXQ_STARTED_BIT)
+ SFC_RXQ_FLUSHING_BIT,
+#define SFC_RXQ_FLUSHING (1 << SFC_RXQ_FLUSHING_BIT)
+ SFC_RXQ_FLUSHED_BIT,
+#define SFC_RXQ_FLUSHED (1 << SFC_RXQ_FLUSHED_BIT)
+ SFC_RXQ_FLUSH_FAILED_BIT,
+#define SFC_RXQ_FLUSH_FAILED (1 << SFC_RXQ_FLUSH_FAILED_BIT)
+};
+
+/**
+ * Receive queue control information.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_rxq {
+ struct sfc_evq *evq;
+ efx_rxq_t *common;
+ efsys_mem_t mem;
+ unsigned int hw_index;
+ unsigned int refill_threshold;
+ struct rte_mempool *refill_mb_pool;
+ struct sfc_dp_rxq *dp;
+ unsigned int state;
+};
+
+static inline unsigned int
+sfc_rxq_sw_index_by_hw_index(unsigned int hw_index)
+{
+ return hw_index;
+}
+
+static inline unsigned int
+sfc_rxq_sw_index(const struct sfc_rxq *rxq)
+{
+ return sfc_rxq_sw_index_by_hw_index(rxq->hw_index);
+}
+
+struct sfc_rxq *sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq);
+
+/**
+ * Receive queue information used on libefx-based data path.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_efx_rxq {
+ /* Used on data path */
+ struct sfc_evq *evq;
+ unsigned int flags;
+#define SFC_EFX_RXQ_FLAG_STARTED 0x1
+#define SFC_EFX_RXQ_FLAG_RUNNING 0x2
+#define SFC_EFX_RXQ_FLAG_RSS_HASH 0x4
+ unsigned int ptr_mask;
+ unsigned int pending;
+ unsigned int completed;
+ uint16_t batch_max;
+ uint16_t prefix_size;
+ struct sfc_efx_rx_sw_desc *sw_desc;
+
+ /* Used on refill */
+ unsigned int added;
+ unsigned int pushed;
+ unsigned int refill_threshold;
+ uint16_t buf_size;
+ struct rte_mempool *refill_mb_pool;
+ efx_rxq_t *common;
+
+ /* Datapath receive queue anchor */
+ struct sfc_dp_rxq dp;
+};
+
+static inline struct sfc_efx_rxq *
+sfc_efx_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
+{
+ return container_of(dp_rxq, struct sfc_efx_rxq, dp);
+}
+
+/**
+ * Receive queue information used during setup/release only.
+ * Allocated on the same socket as adapter data.
+ */
+struct sfc_rxq_info {
+ unsigned int max_entries;
+ unsigned int entries;
+ efx_rxq_type_t type;
+ struct sfc_rxq *rxq;
+ boolean_t deferred_start;
+ boolean_t deferred_started;
+};
+
+int sfc_rx_configure(struct sfc_adapter *sa);
+void sfc_rx_close(struct sfc_adapter *sa);
+int sfc_rx_start(struct sfc_adapter *sa);
+void sfc_rx_stop(struct sfc_adapter *sa);
+
+int sfc_rx_qinit(struct sfc_adapter *sa, unsigned int rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+void sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
+int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
+void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+
+void sfc_rx_qflush_done(struct sfc_rxq *rxq);
+void sfc_rx_qflush_failed(struct sfc_rxq *rxq);
+
+unsigned int sfc_rx_qdesc_npending(struct sfc_adapter *sa,
+ unsigned int sw_index);
+int sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset);
+
+#if EFSYS_OPT_RX_SCALE
+efx_rx_hash_type_t sfc_rte_to_efx_hash_type(uint64_t rss_hf);
+uint64_t sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_RX_H */
diff --git a/drivers/net/sfc/sfc_tso.c b/drivers/net/sfc/sfc_tso.c
new file mode 100644
index 00000000..fb79d749
--- /dev/null
+++ b/drivers/net/sfc/sfc_tso.c
@@ -0,0 +1,201 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ip.h>
+#include <rte_tcp.h>
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_tx.h"
+#include "sfc_ev.h"
+
+/** Standard TSO header length */
+#define SFC_TSOH_STD_LEN 256
+
+/** The number of TSO option descriptors that precede the packet descriptors */
+#define SFC_TSO_OPDESCS_IDX_SHIFT 2
+
+int
+sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries, unsigned int socket_id)
+{
+ unsigned int i;
+
+ for (i = 0; i < txq_entries; ++i) {
+ sw_ring[i].tsoh = rte_malloc_socket("sfc-efx-txq-tsoh-obj",
+ SFC_TSOH_STD_LEN,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (sw_ring[i].tsoh == NULL)
+ goto fail_alloc_tsoh_objs;
+ }
+
+ return 0;
+
+fail_alloc_tsoh_objs:
+ while (i > 0)
+ rte_free(sw_ring[--i].tsoh);
+
+ return ENOMEM;
+}
+
+void
+sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries)
+{
+ unsigned int i;
+
+ for (i = 0; i < txq_entries; ++i) {
+ rte_free(sw_ring[i].tsoh);
+ sw_ring[i].tsoh = NULL;
+ }
+}
+
+static void
+sfc_efx_tso_prepare_header(struct sfc_efx_txq *txq, struct rte_mbuf **in_seg,
+ size_t *in_off, unsigned int idx, size_t bytes_left)
+{
+ struct rte_mbuf *m = *in_seg;
+ size_t bytes_to_copy = 0;
+ uint8_t *tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+
+ do {
+ bytes_to_copy = MIN(bytes_left, m->data_len);
+
+ rte_memcpy(tsoh, rte_pktmbuf_mtod(m, uint8_t *),
+ bytes_to_copy);
+
+ bytes_left -= bytes_to_copy;
+ tsoh += bytes_to_copy;
+
+ if (bytes_left > 0) {
+ m = m->next;
+ SFC_ASSERT(m != NULL);
+ }
+ } while (bytes_left > 0);
+
+ if (bytes_to_copy == m->data_len) {
+ *in_seg = m->next;
+ *in_off = 0;
+ } else {
+ *in_seg = m;
+ *in_off = bytes_to_copy;
+ }
+}
+
+int
+sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
+ struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
+ unsigned int *pkt_descs, size_t *pkt_len)
+{
+ uint8_t *tsoh;
+ const struct tcp_hdr *th;
+ efsys_dma_addr_t header_paddr;
+ uint16_t packet_id;
+ uint32_t sent_seq;
+ struct rte_mbuf *m = *in_seg;
+ size_t nh_off = m->l2_len; /* IP header offset */
+ size_t tcph_off = m->l2_len + m->l3_len; /* TCP header offset */
+ size_t header_len = m->l2_len + m->l3_len + m->l4_len;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(txq->evq->sa->nic);
+
+ idx += SFC_TSO_OPDESCS_IDX_SHIFT;
+
+ /* Packets which have too big headers should be discarded */
+ if (unlikely(header_len > SFC_TSOH_STD_LEN))
+ return EMSGSIZE;
+
+ /*
+ * The TCP header must start at most 208 bytes into the frame.
+ * If it starts later than this then the NIC won't realise
+ * it's a TCP packet and TSO edits won't be applied
+ */
+ if (unlikely(tcph_off > encp->enc_tx_tso_tcp_header_offset_limit))
+ return EMSGSIZE;
+
+ header_paddr = rte_pktmbuf_mtophys(m);
+
+ /*
+ * Sometimes headers may be split across multiple mbufs. In such cases
+ * we need to glue those pieces and store them in some temporary place.
+ * Also, packet headers must be contiguous in memory, so that
+ * they can be referred to with a single DMA descriptor. EF10 has no
+ * limitations on address boundaries crossing by DMA descriptor data.
+ */
+ if (m->data_len < header_len) {
+ sfc_efx_tso_prepare_header(txq, in_seg, in_off, idx,
+ header_len);
+ tsoh = txq->sw_ring[idx & txq->ptr_mask].tsoh;
+
+ header_paddr = rte_malloc_virt2phy((void *)tsoh);
+ } else {
+ if (m->data_len == header_len) {
+ *in_off = 0;
+ *in_seg = m->next;
+ } else {
+ *in_off = header_len;
+ }
+
+ tsoh = rte_pktmbuf_mtod(m, uint8_t *);
+ }
+
+ /* Handle IP header */
+ if (m->ol_flags & PKT_TX_IPV4) {
+ const struct ipv4_hdr *iphe4;
+
+ iphe4 = (const struct ipv4_hdr *)(tsoh + nh_off);
+ rte_memcpy(&packet_id, &iphe4->packet_id, sizeof(uint16_t));
+ packet_id = rte_be_to_cpu_16(packet_id);
+ } else if (m->ol_flags & PKT_TX_IPV6) {
+ packet_id = 0;
+ } else {
+ return EINVAL;
+ }
+
+ /* Handle TCP header */
+ th = (const struct tcp_hdr *)(tsoh + tcph_off);
+
+ rte_memcpy(&sent_seq, &th->sent_seq, sizeof(uint32_t));
+ sent_seq = rte_be_to_cpu_32(sent_seq);
+
+ efx_tx_qdesc_tso2_create(txq->common, packet_id, sent_seq, m->tso_segsz,
+ *pend, EFX_TX_FATSOV2_OPT_NDESCS);
+
+ *pend += EFX_TX_FATSOV2_OPT_NDESCS;
+ *pkt_descs += EFX_TX_FATSOV2_OPT_NDESCS;
+
+ efx_tx_qdesc_dma_create(txq->common, header_paddr, header_len,
+ B_FALSE, (*pend)++);
+ (*pkt_descs)++;
+ *pkt_len -= header_len;
+
+ return 0;
+}
diff --git a/drivers/net/sfc/sfc_tweak.h b/drivers/net/sfc/sfc_tweak.h
new file mode 100644
index 00000000..4ef7fc8b
--- /dev/null
+++ b/drivers/net/sfc/sfc_tweak.h
@@ -0,0 +1,56 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_TWEAK_H_
+#define _SFC_TWEAK_H_
+
+/*
+ * The header is intended to collect defines/constants which could be
+ * tweaked to improve the PMD performance characteristics depending on
+ * the usecase or requirements (CPU load, packet rate, latency).
+ */
+
+/**
+ * Number of Rx descriptors in the bulk submitted on Rx ring refill.
+ */
+#define SFC_RX_REFILL_BULK (RTE_CACHE_LINE_SIZE / sizeof(efx_qword_t))
+
+/**
+ * Make the transmit path reap at least one time per a burst;
+ * this improves cache locality because the same mbufs may be used to send
+ * subsequent bursts in certain cases because of well-timed reap
+ */
+#define SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE 0
+
+/** Default free threshold follows recommendations from DPDK documentation */
+#define SFC_TX_DEFAULT_FREE_THRESH 32
+
+#endif /* _SFC_TWEAK_H_ */
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
new file mode 100644
index 00000000..b8581d14
--- /dev/null
+++ b/drivers/net/sfc/sfc_tx.c
@@ -0,0 +1,992 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "sfc.h"
+#include "sfc_debug.h"
+#include "sfc_log.h"
+#include "sfc_ev.h"
+#include "sfc_tx.h"
+#include "sfc_tweak.h"
+#include "sfc_kvargs.h"
+
+/*
+ * Maximum number of TX queue flush attempts in case of
+ * failure or flush timeout
+ */
+#define SFC_TX_QFLUSH_ATTEMPTS (3)
+
+/*
+ * Time to wait between event queue polling attempts when waiting for TX
+ * queue flush done or flush failed events
+ */
+#define SFC_TX_QFLUSH_POLL_WAIT_MS (1)
+
+/*
+ * Maximum number of event queue polling attempts when waiting for TX queue
+ * flush done or flush failed events; it defines TX queue flush attempt timeout
+ * together with SFC_TX_QFLUSH_POLL_WAIT_MS
+ */
+#define SFC_TX_QFLUSH_POLL_ATTEMPTS (2000)
+
+static int
+sfc_tx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_tx_desc,
+ const struct rte_eth_txconf *tx_conf)
+{
+ unsigned int flags = tx_conf->txq_flags;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ int rc = 0;
+
+ if (tx_conf->tx_rs_thresh != 0) {
+ sfc_err(sa, "RS bit in transmit descriptor is not supported");
+ rc = EINVAL;
+ }
+
+ if (tx_conf->tx_free_thresh > EFX_TXQ_LIMIT(nb_tx_desc)) {
+ sfc_err(sa,
+ "TxQ free threshold too large: %u vs maximum %u",
+ tx_conf->tx_free_thresh, EFX_TXQ_LIMIT(nb_tx_desc));
+ rc = EINVAL;
+ }
+
+ if (tx_conf->tx_thresh.pthresh != 0 ||
+ tx_conf->tx_thresh.hthresh != 0 ||
+ tx_conf->tx_thresh.wthresh != 0) {
+ sfc_err(sa,
+ "prefetch/host/writeback thresholds are not supported");
+ rc = EINVAL;
+ }
+
+ if (((flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) &&
+ (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)) {
+ sfc_err(sa, "Multi-segment is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+
+ if ((flags & ETH_TXQ_FLAGS_NOVLANOFFL) == 0) {
+ if (!encp->enc_hw_tx_insert_vlan_enabled) {
+ sfc_err(sa, "VLAN offload is not supported");
+ rc = EINVAL;
+ } else if (~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) {
+ sfc_err(sa,
+ "VLAN offload is not supported by %s datapath",
+ sa->dp_tx->dp.name);
+ rc = EINVAL;
+ }
+ }
+
+ if ((flags & ETH_TXQ_FLAGS_NOXSUMSCTP) == 0) {
+ sfc_err(sa, "SCTP offload is not supported");
+ rc = EINVAL;
+ }
+
+ /* We either perform both TCP and UDP offload, or no offload at all */
+ if (((flags & ETH_TXQ_FLAGS_NOXSUMTCP) == 0) !=
+ ((flags & ETH_TXQ_FLAGS_NOXSUMUDP) == 0)) {
+ sfc_err(sa, "TCP and UDP offloads can't be set independently");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+void
+sfc_tx_qflush_done(struct sfc_txq *txq)
+{
+ txq->state |= SFC_TXQ_FLUSHED;
+ txq->state &= ~SFC_TXQ_FLUSHING;
+}
+
+int
+sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_txq_info *txq_info;
+ struct sfc_evq *evq;
+ struct sfc_txq *txq;
+ int rc = 0;
+ struct sfc_dp_tx_qcreate_info info;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ rc = sfc_tx_qcheck_conf(sa, nb_tx_desc, tx_conf);
+ if (rc != 0)
+ goto fail_bad_conf;
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ SFC_ASSERT(nb_tx_desc <= sa->txq_max_entries);
+ txq_info->entries = nb_tx_desc;
+
+ rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_TX, sw_index,
+ txq_info->entries, socket_id, &evq);
+ if (rc != 0)
+ goto fail_ev_qinit;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-txq", sizeof(*txq), 0, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ txq_info->txq = txq;
+
+ txq->hw_index = sw_index;
+ txq->evq = evq;
+ txq->free_thresh =
+ (tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
+ SFC_TX_DEFAULT_FREE_THRESH;
+ txq->flags = tx_conf->txq_flags;
+
+ rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
+ socket_id, &txq->mem);
+ if (rc != 0)
+ goto fail_dma_alloc;
+
+ memset(&info, 0, sizeof(info));
+ info.free_thresh = txq->free_thresh;
+ info.flags = tx_conf->txq_flags;
+ info.txq_entries = txq_info->entries;
+ info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
+ info.txq_hw_ring = txq->mem.esm_base;
+ info.evq_entries = txq_info->entries;
+ info.evq_hw_ring = evq->mem.esm_base;
+ info.hw_index = txq->hw_index;
+ info.mem_bar = sa->mem_bar.esb_base;
+
+ rc = sa->dp_tx->qcreate(sa->eth_dev->data->port_id, sw_index,
+ &SFC_DEV_TO_PCI(sa->eth_dev)->addr,
+ socket_id, &info, &txq->dp);
+ if (rc != 0)
+ goto fail_dp_tx_qinit;
+
+ evq->dp_txq = txq->dp;
+
+ txq->state = SFC_TXQ_INITIALIZED;
+
+ txq_info->deferred_start = (tx_conf->tx_deferred_start != 0);
+
+ return 0;
+
+fail_dp_tx_qinit:
+ sfc_dma_free(sa, &txq->mem);
+
+fail_dma_alloc:
+ txq_info->txq = NULL;
+ rte_free(txq);
+
+fail_txq_alloc:
+ sfc_ev_qfini(evq);
+
+fail_ev_qinit:
+ txq_info->entries = 0;
+
+fail_bad_conf:
+ sfc_log_init(sa, "failed (TxQ = %u, rc = %d)", sw_index, rc);
+ return rc;
+}
+
+void
+sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+ SFC_ASSERT(txq != NULL);
+ SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+
+ sa->dp_tx->qdestroy(txq->dp);
+ txq->dp = NULL;
+
+ txq_info->txq = NULL;
+ txq_info->entries = 0;
+
+ sfc_dma_free(sa, &txq->mem);
+
+ sfc_ev_qfini(txq->evq);
+ txq->evq = NULL;
+
+ rte_free(txq);
+}
+
+static int
+sfc_tx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ return 0;
+}
+
+static int
+sfc_tx_check_mode(struct sfc_adapter *sa, const struct rte_eth_txmode *txmode)
+{
+ int rc = 0;
+
+ switch (txmode->mq_mode) {
+ case ETH_MQ_TX_NONE:
+ break;
+ default:
+ sfc_err(sa, "Tx multi-queue mode %u not supported",
+ txmode->mq_mode);
+ rc = EINVAL;
+ }
+
+ /*
+ * These features are claimed to be i40e-specific,
+ * but it does make sense to double-check their absence
+ */
+ if (txmode->hw_vlan_reject_tagged) {
+ sfc_err(sa, "Rejecting tagged packets not supported");
+ rc = EINVAL;
+ }
+
+ if (txmode->hw_vlan_reject_untagged) {
+ sfc_err(sa, "Rejecting untagged packets not supported");
+ rc = EINVAL;
+ }
+
+ if (txmode->hw_vlan_insert_pvid) {
+ sfc_err(sa, "Port-based VLAN insertion not supported");
+ rc = EINVAL;
+ }
+
+ return rc;
+}
+
+/**
+ * Destroy excess queues that are no longer needed after reconfiguration
+ * or complete close.
+ */
+static void
+sfc_tx_fini_queues(struct sfc_adapter *sa, unsigned int nb_tx_queues)
+{
+ int sw_index;
+
+ SFC_ASSERT(nb_tx_queues <= sa->txq_count);
+
+ sw_index = sa->txq_count;
+ while (--sw_index >= (int)nb_tx_queues) {
+ if (sa->txq_info[sw_index].txq != NULL)
+ sfc_tx_qfini(sa, sw_index);
+ }
+
+ sa->txq_count = nb_tx_queues;
+}
+
+int
+sfc_tx_configure(struct sfc_adapter *sa)
+{
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ const struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
+ const unsigned int nb_tx_queues = sa->eth_dev->data->nb_tx_queues;
+ int rc = 0;
+
+ sfc_log_init(sa, "nb_tx_queues=%u (old %u)",
+ nb_tx_queues, sa->txq_count);
+
+ /*
+ * The datapath implementation assumes absence of boundary
+ * limits on Tx DMA descriptors. Addition of these checks on
+ * datapath would simply make the datapath slower.
+ */
+ if (encp->enc_tx_dma_desc_boundary != 0) {
+ rc = ENOTSUP;
+ goto fail_tx_dma_desc_boundary;
+ }
+
+ rc = sfc_tx_check_mode(sa, &dev_conf->txmode);
+ if (rc != 0)
+ goto fail_check_mode;
+
+ if (nb_tx_queues == sa->txq_count)
+ goto done;
+
+ if (sa->txq_info == NULL) {
+ sa->txq_info = rte_calloc_socket("sfc-txqs", nb_tx_queues,
+ sizeof(sa->txq_info[0]), 0,
+ sa->socket_id);
+ if (sa->txq_info == NULL)
+ goto fail_txqs_alloc;
+ } else {
+ struct sfc_txq_info *new_txq_info;
+
+ if (nb_tx_queues < sa->txq_count)
+ sfc_tx_fini_queues(sa, nb_tx_queues);
+
+ new_txq_info =
+ rte_realloc(sa->txq_info,
+ nb_tx_queues * sizeof(sa->txq_info[0]), 0);
+ if (new_txq_info == NULL && nb_tx_queues > 0)
+ goto fail_txqs_realloc;
+
+ sa->txq_info = new_txq_info;
+ if (nb_tx_queues > sa->txq_count)
+ memset(&sa->txq_info[sa->txq_count], 0,
+ (nb_tx_queues - sa->txq_count) *
+ sizeof(sa->txq_info[0]));
+ }
+
+ while (sa->txq_count < nb_tx_queues) {
+ rc = sfc_tx_qinit_info(sa, sa->txq_count);
+ if (rc != 0)
+ goto fail_tx_qinit_info;
+
+ sa->txq_count++;
+ }
+
+done:
+ return 0;
+
+fail_tx_qinit_info:
+fail_txqs_realloc:
+fail_txqs_alloc:
+ sfc_tx_close(sa);
+
+fail_check_mode:
+fail_tx_dma_desc_boundary:
+ sfc_log_init(sa, "failed (rc = %d)", rc);
+ return rc;
+}
+
+void
+sfc_tx_close(struct sfc_adapter *sa)
+{
+ sfc_tx_fini_queues(sa, 0);
+
+ rte_free(sa->txq_info);
+ sa->txq_info = NULL;
+}
+
+int
+sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct rte_eth_dev_data *dev_data;
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+ struct sfc_evq *evq;
+ uint16_t flags;
+ unsigned int desc_index;
+ int rc = 0;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+
+ SFC_ASSERT(txq->state == SFC_TXQ_INITIALIZED);
+
+ evq = txq->evq;
+
+ rc = sfc_ev_qstart(evq, sfc_evq_index_by_txq_sw_index(sa, sw_index));
+ if (rc != 0)
+ goto fail_ev_qstart;
+
+ /*
+ * It seems that DPDK has no controls regarding IPv4 offloads,
+ * hence, we always enable it here
+ */
+ if ((txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP) ||
+ (txq->flags & ETH_TXQ_FLAGS_NOXSUMUDP)) {
+ flags = EFX_TXQ_CKSUM_IPV4;
+ } else {
+ flags = EFX_TXQ_CKSUM_IPV4 | EFX_TXQ_CKSUM_TCPUDP;
+
+ if (sa->tso)
+ flags |= EFX_TXQ_FATSOV2;
+ }
+
+ rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
+ txq_info->entries, 0 /* not used on EF10 */,
+ flags, evq->common,
+ &txq->common, &desc_index);
+ if (rc != 0) {
+ if (sa->tso && (rc == ENOSPC))
+ sfc_err(sa, "ran out of TSO contexts");
+
+ goto fail_tx_qcreate;
+ }
+
+ efx_tx_qenable(txq->common);
+
+ txq->state |= SFC_TXQ_STARTED;
+
+ rc = sa->dp_tx->qstart(txq->dp, evq->read_ptr, desc_index);
+ if (rc != 0)
+ goto fail_dp_qstart;
+
+ /*
+ * It seems to be used by DPDK for debug purposes only ('rte_ether')
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STARTED;
+
+ return 0;
+
+fail_dp_qstart:
+ txq->state = SFC_TXQ_INITIALIZED;
+ efx_tx_qdestroy(txq->common);
+
+fail_tx_qcreate:
+ sfc_ev_qstop(evq);
+
+fail_ev_qstart:
+ return rc;
+}
+
+void
+sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
+{
+ struct rte_eth_dev_data *dev_data;
+ struct sfc_txq_info *txq_info;
+ struct sfc_txq *txq;
+ unsigned int retry_count;
+ unsigned int wait_count;
+
+ sfc_log_init(sa, "TxQ = %u", sw_index);
+
+ SFC_ASSERT(sw_index < sa->txq_count);
+ txq_info = &sa->txq_info[sw_index];
+
+ txq = txq_info->txq;
+
+ if (txq->state == SFC_TXQ_INITIALIZED)
+ return;
+
+ SFC_ASSERT(txq->state & SFC_TXQ_STARTED);
+
+ sa->dp_tx->qstop(txq->dp, &txq->evq->read_ptr);
+
+ /*
+ * Retry TX queue flushing in case of flush failed or
+ * timeout; in the worst case it can delay for 6 seconds
+ */
+ for (retry_count = 0;
+ ((txq->state & SFC_TXQ_FLUSHED) == 0) &&
+ (retry_count < SFC_TX_QFLUSH_ATTEMPTS);
+ ++retry_count) {
+ if (efx_tx_qflush(txq->common) != 0) {
+ txq->state |= SFC_TXQ_FLUSHING;
+ break;
+ }
+
+ /*
+ * Wait for TX queue flush done or flush failed event at least
+ * SFC_TX_QFLUSH_POLL_WAIT_MS milliseconds and not more
+ * than 2 seconds (SFC_TX_QFLUSH_POLL_WAIT_MS multiplied
+ * by SFC_TX_QFLUSH_POLL_ATTEMPTS)
+ */
+ wait_count = 0;
+ do {
+ rte_delay_ms(SFC_TX_QFLUSH_POLL_WAIT_MS);
+ sfc_ev_qpoll(txq->evq);
+ } while ((txq->state & SFC_TXQ_FLUSHING) &&
+ wait_count++ < SFC_TX_QFLUSH_POLL_ATTEMPTS);
+
+ if (txq->state & SFC_TXQ_FLUSHING)
+ sfc_err(sa, "TxQ %u flush timed out", sw_index);
+
+ if (txq->state & SFC_TXQ_FLUSHED)
+ sfc_info(sa, "TxQ %u flushed", sw_index);
+ }
+
+ sa->dp_tx->qreap(txq->dp);
+
+ txq->state = SFC_TXQ_INITIALIZED;
+
+ efx_tx_qdestroy(txq->common);
+
+ sfc_ev_qstop(txq->evq);
+
+ /*
+ * It seems to be used by DPDK for debug purposes only ('rte_ether')
+ */
+ dev_data = sa->eth_dev->data;
+ dev_data->tx_queue_state[sw_index] = RTE_ETH_QUEUE_STATE_STOPPED;
+}
+
+int
+sfc_tx_start(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+ int rc = 0;
+
+ sfc_log_init(sa, "txq_count = %u", sa->txq_count);
+
+ if (sa->tso) {
+ if (!efx_nic_cfg_get(sa->nic)->enc_fw_assisted_tso_v2_enabled) {
+ sfc_warn(sa, "TSO support was unable to be restored");
+ sa->tso = B_FALSE;
+ }
+ }
+
+ rc = efx_tx_init(sa->nic);
+ if (rc != 0)
+ goto fail_efx_tx_init;
+
+ for (sw_index = 0; sw_index < sa->txq_count; ++sw_index) {
+ if (!(sa->txq_info[sw_index].deferred_start) ||
+ sa->txq_info[sw_index].deferred_started) {
+ rc = sfc_tx_qstart(sa, sw_index);
+ if (rc != 0)
+ goto fail_tx_qstart;
+ }
+ }
+
+ return 0;
+
+fail_tx_qstart:
+ while (sw_index-- > 0)
+ sfc_tx_qstop(sa, sw_index);
+
+ efx_tx_fini(sa->nic);
+
+fail_efx_tx_init:
+ sfc_log_init(sa, "failed (rc = %d)", rc);
+ return rc;
+}
+
+void
+sfc_tx_stop(struct sfc_adapter *sa)
+{
+ unsigned int sw_index;
+
+ sfc_log_init(sa, "txq_count = %u", sa->txq_count);
+
+ sw_index = sa->txq_count;
+ while (sw_index-- > 0) {
+ if (sa->txq_info[sw_index].txq != NULL)
+ sfc_tx_qstop(sa, sw_index);
+ }
+
+ efx_tx_fini(sa->nic);
+}
+
+static void
+sfc_efx_tx_reap(struct sfc_efx_txq *txq)
+{
+ unsigned int completed;
+
+ sfc_ev_qpoll(txq->evq);
+
+ for (completed = txq->completed;
+ completed != txq->pending; completed++) {
+ struct sfc_efx_tx_sw_desc *txd;
+
+ txd = &txq->sw_ring[completed & txq->ptr_mask];
+
+ if (txd->mbuf != NULL) {
+ rte_pktmbuf_free(txd->mbuf);
+ txd->mbuf = NULL;
+ }
+ }
+
+ txq->completed = completed;
+}
+
+/*
+ * The function is used to insert or update VLAN tag;
+ * the firmware has state of the firmware tag to insert per TxQ
+ * (controlled by option descriptors), hence, if the tag of the
+ * packet to be sent is different from one remembered by the firmware,
+ * the function will update it
+ */
+static unsigned int
+sfc_efx_tx_maybe_insert_tag(struct sfc_efx_txq *txq, struct rte_mbuf *m,
+ efx_desc_t **pend)
+{
+ uint16_t this_tag = ((m->ol_flags & PKT_TX_VLAN_PKT) ?
+ m->vlan_tci : 0);
+
+ if (this_tag == txq->hw_vlan_tci)
+ return 0;
+
+ /*
+ * The expression inside SFC_ASSERT() is not desired to be checked in
+ * a non-debug build because it might be too expensive on the data path
+ */
+ SFC_ASSERT(efx_nic_cfg_get(txq->evq->sa->nic)->enc_hw_tx_insert_vlan_enabled);
+
+ efx_tx_qdesc_vlantci_create(txq->common, rte_cpu_to_be_16(this_tag),
+ *pend);
+ (*pend)++;
+ txq->hw_vlan_tci = this_tag;
+
+ return 1;
+}
+
+static uint16_t
+sfc_efx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct sfc_dp_txq *dp_txq = (struct sfc_dp_txq *)tx_queue;
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ unsigned int added = txq->added;
+ unsigned int pushed = added;
+ unsigned int pkts_sent = 0;
+ efx_desc_t *pend = &txq->pend_desc[0];
+ const unsigned int hard_max_fill = EFX_TXQ_LIMIT(txq->ptr_mask + 1);
+ const unsigned int soft_max_fill = hard_max_fill - txq->free_thresh;
+ unsigned int fill_level = added - txq->completed;
+ boolean_t reap_done;
+ int rc __rte_unused;
+ struct rte_mbuf **pktp;
+
+ if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_RUNNING) == 0))
+ goto done;
+
+ /*
+ * If insufficient space for a single packet is present,
+ * we should reap; otherwise, we shouldn't do that all the time
+ * to avoid latency increase
+ */
+ reap_done = (fill_level > soft_max_fill);
+
+ if (reap_done) {
+ sfc_efx_tx_reap(txq);
+ /*
+ * Recalculate fill level since 'txq->completed'
+ * might have changed on reap
+ */
+ fill_level = added - txq->completed;
+ }
+
+ for (pkts_sent = 0, pktp = &tx_pkts[0];
+ (pkts_sent < nb_pkts) && (fill_level <= soft_max_fill);
+ pkts_sent++, pktp++) {
+ struct rte_mbuf *m_seg = *pktp;
+ size_t pkt_len = m_seg->pkt_len;
+ unsigned int pkt_descs = 0;
+ size_t in_off = 0;
+
+ /*
+ * Here VLAN TCI is expected to be zero in case if no
+ * DEV_TX_VLAN_OFFLOAD capability is advertised;
+ * if the calling app ignores the absence of
+ * DEV_TX_VLAN_OFFLOAD and pushes VLAN TCI, then
+ * TX_ERROR will occur
+ */
+ pkt_descs += sfc_efx_tx_maybe_insert_tag(txq, m_seg, &pend);
+
+ if (m_seg->ol_flags & PKT_TX_TCP_SEG) {
+ /*
+ * We expect correct 'pkt->l[2, 3, 4]_len' values
+ * to be set correctly by the caller
+ */
+ if (sfc_efx_tso_do(txq, added, &m_seg, &in_off, &pend,
+ &pkt_descs, &pkt_len) != 0) {
+ /* We may have reached this place for
+ * one of the following reasons:
+ *
+ * 1) Packet header length is greater
+ * than SFC_TSOH_STD_LEN
+ * 2) TCP header starts at more then
+ * 208 bytes into the frame
+ *
+ * We will deceive RTE saying that we have sent
+ * the packet, but we will actually drop it.
+ * Hence, we should revert 'pend' to the
+ * previous state (in case we have added
+ * VLAN descriptor) and start processing
+ * another one packet. But the original
+ * mbuf shouldn't be orphaned
+ */
+ pend -= pkt_descs;
+
+ rte_pktmbuf_free(*pktp);
+
+ continue;
+ }
+
+ /*
+ * We've only added 2 FATSOv2 option descriptors
+ * and 1 descriptor for the linearized packet header.
+ * The outstanding work will be done in the same manner
+ * as for the usual non-TSO path
+ */
+ }
+
+ for (; m_seg != NULL; m_seg = m_seg->next) {
+ efsys_dma_addr_t next_frag;
+ size_t seg_len;
+
+ seg_len = m_seg->data_len;
+ next_frag = rte_mbuf_data_dma_addr(m_seg);
+
+ /*
+ * If we've started TSO transaction few steps earlier,
+ * we'll skip packet header using an offset in the
+ * current segment (which has been set to the
+ * first one containing payload)
+ */
+ seg_len -= in_off;
+ next_frag += in_off;
+ in_off = 0;
+
+ do {
+ efsys_dma_addr_t frag_addr = next_frag;
+ size_t frag_len;
+
+ /*
+ * It is assumed here that there is no
+ * limitation on address boundary
+ * crossing by DMA descriptor.
+ */
+ frag_len = MIN(seg_len, txq->dma_desc_size_max);
+ next_frag += frag_len;
+ seg_len -= frag_len;
+ pkt_len -= frag_len;
+
+ efx_tx_qdesc_dma_create(txq->common,
+ frag_addr, frag_len,
+ (pkt_len == 0),
+ pend++);
+
+ pkt_descs++;
+ } while (seg_len != 0);
+ }
+
+ added += pkt_descs;
+
+ fill_level += pkt_descs;
+ if (unlikely(fill_level > hard_max_fill)) {
+ /*
+ * Our estimation for maximum number of descriptors
+ * required to send a packet seems to be wrong.
+ * Try to reap (if we haven't yet).
+ */
+ if (!reap_done) {
+ sfc_efx_tx_reap(txq);
+ reap_done = B_TRUE;
+ fill_level = added - txq->completed;
+ if (fill_level > hard_max_fill) {
+ pend -= pkt_descs;
+ break;
+ }
+ } else {
+ pend -= pkt_descs;
+ break;
+ }
+ }
+
+ /* Assign mbuf to the last used desc */
+ txq->sw_ring[(added - 1) & txq->ptr_mask].mbuf = *pktp;
+ }
+
+ if (likely(pkts_sent > 0)) {
+ rc = efx_tx_qdesc_post(txq->common, txq->pend_desc,
+ pend - &txq->pend_desc[0],
+ txq->completed, &txq->added);
+ SFC_ASSERT(rc == 0);
+
+ if (likely(pushed != txq->added))
+ efx_tx_qpush(txq->common, txq->added, pushed);
+ }
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+ if (!reap_done)
+ sfc_efx_tx_reap(txq);
+#endif
+
+done:
+ return pkts_sent;
+}
+
+struct sfc_txq *
+sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq)
+{
+ const struct sfc_dp_queue *dpq = &dp_txq->dpq;
+ struct rte_eth_dev *eth_dev;
+ struct sfc_adapter *sa;
+ struct sfc_txq *txq;
+
+ SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
+ eth_dev = &rte_eth_devices[dpq->port_id];
+
+ sa = eth_dev->data->dev_private;
+
+ SFC_ASSERT(dpq->queue_id < sa->txq_count);
+ txq = sa->txq_info[dpq->queue_id].txq;
+
+ SFC_ASSERT(txq != NULL);
+ return txq;
+}
+
+static sfc_dp_tx_qcreate_t sfc_efx_tx_qcreate;
+static int
+sfc_efx_tx_qcreate(uint16_t port_id, uint16_t queue_id,
+ const struct rte_pci_addr *pci_addr,
+ int socket_id,
+ const struct sfc_dp_tx_qcreate_info *info,
+ struct sfc_dp_txq **dp_txqp)
+{
+ struct sfc_efx_txq *txq;
+ struct sfc_txq *ctrl_txq;
+ int rc;
+
+ rc = ENOMEM;
+ txq = rte_zmalloc_socket("sfc-efx-txq", sizeof(*txq),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq == NULL)
+ goto fail_txq_alloc;
+
+ sfc_dp_queue_init(&txq->dp.dpq, port_id, queue_id, pci_addr);
+
+ rc = ENOMEM;
+ txq->pend_desc = rte_calloc_socket("sfc-efx-txq-pend-desc",
+ EFX_TXQ_LIMIT(info->txq_entries),
+ sizeof(*txq->pend_desc), 0,
+ socket_id);
+ if (txq->pend_desc == NULL)
+ goto fail_pend_desc_alloc;
+
+ rc = ENOMEM;
+ txq->sw_ring = rte_calloc_socket("sfc-efx-txq-sw_ring",
+ info->txq_entries,
+ sizeof(*txq->sw_ring),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (txq->sw_ring == NULL)
+ goto fail_sw_ring_alloc;
+
+ ctrl_txq = sfc_txq_by_dp_txq(&txq->dp);
+ if (ctrl_txq->evq->sa->tso) {
+ rc = sfc_efx_tso_alloc_tsoh_objs(txq->sw_ring,
+ info->txq_entries, socket_id);
+ if (rc != 0)
+ goto fail_alloc_tsoh_objs;
+ }
+
+ txq->evq = ctrl_txq->evq;
+ txq->ptr_mask = info->txq_entries - 1;
+ txq->free_thresh = info->free_thresh;
+ txq->dma_desc_size_max = info->dma_desc_size_max;
+
+ *dp_txqp = &txq->dp;
+ return 0;
+
+fail_alloc_tsoh_objs:
+ rte_free(txq->sw_ring);
+
+fail_sw_ring_alloc:
+ rte_free(txq->pend_desc);
+
+fail_pend_desc_alloc:
+ rte_free(txq);
+
+fail_txq_alloc:
+ return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_efx_tx_qdestroy;
+static void
+sfc_efx_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ sfc_efx_tso_free_tsoh_objs(txq->sw_ring, txq->ptr_mask + 1);
+ rte_free(txq->sw_ring);
+ rte_free(txq->pend_desc);
+ rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_efx_tx_qstart;
+static int
+sfc_efx_tx_qstart(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int evq_read_ptr,
+ unsigned int txq_desc_index)
+{
+ /* libefx-based datapath is specific to libefx-based PMD */
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ struct sfc_txq *ctrl_txq = sfc_txq_by_dp_txq(dp_txq);
+
+ txq->common = ctrl_txq->common;
+
+ txq->pending = txq->completed = txq->added = txq_desc_index;
+ txq->hw_vlan_tci = 0;
+
+ txq->flags |= (SFC_EFX_TXQ_FLAG_STARTED | SFC_EFX_TXQ_FLAG_RUNNING);
+
+ return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_efx_tx_qstop;
+static void
+sfc_efx_tx_qstop(struct sfc_dp_txq *dp_txq,
+ __rte_unused unsigned int *evq_read_ptr)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_RUNNING;
+}
+
+static sfc_dp_tx_qreap_t sfc_efx_tx_qreap;
+static void
+sfc_efx_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+ struct sfc_efx_txq *txq = sfc_efx_txq_by_dp_txq(dp_txq);
+ unsigned int txds;
+
+ sfc_efx_tx_reap(txq);
+
+ for (txds = 0; txds <= txq->ptr_mask; txds++) {
+ if (txq->sw_ring[txds].mbuf != NULL) {
+ rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
+ txq->sw_ring[txds].mbuf = NULL;
+ }
+ }
+
+ txq->flags &= ~SFC_EFX_TXQ_FLAG_STARTED;
+}
+
+struct sfc_dp_tx sfc_efx_tx = {
+ .dp = {
+ .name = SFC_KVARG_DATAPATH_EFX,
+ .type = SFC_DP_TX,
+ .hw_fw_caps = 0,
+ },
+ .features = SFC_DP_TX_FEAT_VLAN_INSERT |
+ SFC_DP_TX_FEAT_TSO |
+ SFC_DP_TX_FEAT_MULTI_SEG,
+ .qcreate = sfc_efx_tx_qcreate,
+ .qdestroy = sfc_efx_tx_qdestroy,
+ .qstart = sfc_efx_tx_qstart,
+ .qstop = sfc_efx_tx_qstop,
+ .qreap = sfc_efx_tx_qreap,
+ .pkt_burst = sfc_efx_xmit_pkts,
+};
diff --git a/drivers/net/sfc/sfc_tx.h b/drivers/net/sfc/sfc_tx.h
new file mode 100644
index 00000000..6c3ac3b6
--- /dev/null
+++ b/drivers/net/sfc/sfc_tx.h
@@ -0,0 +1,164 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016-2017 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SFC_TX_H
+#define _SFC_TX_H
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+
+#include "efx.h"
+
+#include "sfc_dp_tx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct sfc_adapter;
+struct sfc_evq;
+
+/**
+ * Software Tx descriptor information associated with hardware Tx
+ * descriptor.
+ */
+struct sfc_efx_tx_sw_desc {
+ struct rte_mbuf *mbuf;
+ uint8_t *tsoh; /* Buffer to store TSO header */
+};
+
+enum sfc_txq_state_bit {
+ SFC_TXQ_INITIALIZED_BIT = 0,
+#define SFC_TXQ_INITIALIZED (1 << SFC_TXQ_INITIALIZED_BIT)
+ SFC_TXQ_STARTED_BIT,
+#define SFC_TXQ_STARTED (1 << SFC_TXQ_STARTED_BIT)
+ SFC_TXQ_FLUSHING_BIT,
+#define SFC_TXQ_FLUSHING (1 << SFC_TXQ_FLUSHING_BIT)
+ SFC_TXQ_FLUSHED_BIT,
+#define SFC_TXQ_FLUSHED (1 << SFC_TXQ_FLUSHED_BIT)
+};
+
+/**
+ * Transmit queue control information. Not used on datapath.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_txq {
+ unsigned int state;
+ unsigned int hw_index;
+ struct sfc_evq *evq;
+ efsys_mem_t mem;
+ struct sfc_dp_txq *dp;
+ efx_txq_t *common;
+ unsigned int free_thresh;
+ unsigned int flags;
+};
+
+static inline unsigned int
+sfc_txq_sw_index_by_hw_index(unsigned int hw_index)
+{
+ return hw_index;
+}
+
+static inline unsigned int
+sfc_txq_sw_index(const struct sfc_txq *txq)
+{
+ return sfc_txq_sw_index_by_hw_index(txq->hw_index);
+}
+
+struct sfc_txq *sfc_txq_by_dp_txq(const struct sfc_dp_txq *dp_txq);
+
+/**
+ * Transmit queue information used on libefx-based data path.
+ * Allocated on the socket specified on the queue setup.
+ */
+struct sfc_efx_txq {
+ struct sfc_evq *evq;
+ struct sfc_efx_tx_sw_desc *sw_ring;
+ unsigned int ptr_mask;
+ efx_desc_t *pend_desc;
+ efx_txq_t *common;
+ unsigned int added;
+ unsigned int pending;
+ unsigned int completed;
+ unsigned int free_thresh;
+ uint16_t hw_vlan_tci;
+ uint16_t dma_desc_size_max;
+
+ unsigned int hw_index;
+ unsigned int flags;
+#define SFC_EFX_TXQ_FLAG_STARTED 0x1
+#define SFC_EFX_TXQ_FLAG_RUNNING 0x2
+
+ /* Datapath transmit queue anchor */
+ struct sfc_dp_txq dp;
+};
+
+static inline struct sfc_efx_txq *
+sfc_efx_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
+{
+ return container_of(dp_txq, struct sfc_efx_txq, dp);
+}
+
+struct sfc_txq_info {
+ unsigned int entries;
+ struct sfc_txq *txq;
+ boolean_t deferred_start;
+ boolean_t deferred_started;
+};
+
+int sfc_tx_configure(struct sfc_adapter *sa);
+void sfc_tx_close(struct sfc_adapter *sa);
+
+int sfc_tx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void sfc_tx_qfini(struct sfc_adapter *sa, unsigned int sw_index);
+
+void sfc_tx_qflush_done(struct sfc_txq *txq);
+int sfc_tx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
+void sfc_tx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
+int sfc_tx_start(struct sfc_adapter *sa);
+void sfc_tx_stop(struct sfc_adapter *sa);
+
+/* From 'sfc_tso.c' */
+int sfc_efx_tso_alloc_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries,
+ unsigned int socket_id);
+void sfc_efx_tso_free_tsoh_objs(struct sfc_efx_tx_sw_desc *sw_ring,
+ unsigned int txq_entries);
+int sfc_efx_tso_do(struct sfc_efx_txq *txq, unsigned int idx,
+ struct rte_mbuf **in_seg, size_t *in_off, efx_desc_t **pend,
+ unsigned int *pkt_descs, size_t *pkt_len);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_TX_H */
diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile
index 4a7b14c9..836c3b2a 100644
--- a/drivers/net/szedata2/Makefile
+++ b/drivers/net/szedata2/Makefile
@@ -54,11 +54,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c
#
SYMLINK-y-include +=
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c
index f3cd52dc..54212b71 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.c
+++ b/drivers/net/szedata2/rte_eth_szedata2.c
@@ -45,6 +45,7 @@
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_kvargs.h>
@@ -91,6 +92,7 @@ struct pmd_internals {
uint16_t max_rx_queues;
uint16_t max_tx_queues;
char sze_dev[PATH_MAX];
+ struct rte_mem_resource *pci_rsc;
};
static struct ether_addr eth_addr = {
@@ -1030,6 +1032,7 @@ eth_dev_info(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
struct pmd_internals *internals = dev->data->dev_private;
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
dev_info->if_index = 0;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
@@ -1144,8 +1147,10 @@ eth_link_update(struct rte_eth_dev *dev,
struct rte_eth_link link;
struct rte_eth_link *link_ptr = &link;
struct rte_eth_link *dev_link = &dev->data->dev_link;
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
switch (cgmii_link_speed(ibuf)) {
@@ -1180,11 +1185,13 @@ eth_link_update(struct rte_eth_dev *dev,
static int
eth_dev_set_link_up(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_OBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_OBUF_BASE_OFF,
volatile struct szedata2_cgmii_obuf *);
cgmii_ibuf_enable(ibuf);
@@ -1195,11 +1202,13 @@ eth_dev_set_link_up(struct rte_eth_dev *dev)
static int
eth_dev_set_link_down(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_OBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_OBUF_BASE_OFF,
volatile struct szedata2_cgmii_obuf *);
cgmii_ibuf_disable(ibuf);
@@ -1281,8 +1290,10 @@ eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
static void
eth_promiscuous_enable(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_PROMISC);
}
@@ -1290,8 +1301,10 @@ eth_promiscuous_enable(struct rte_eth_dev *dev)
static void
eth_promiscuous_disable(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
}
@@ -1299,8 +1312,10 @@ eth_promiscuous_disable(struct rte_eth_dev *dev)
static void
eth_allmulticast_enable(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
}
@@ -1308,8 +1323,10 @@ eth_allmulticast_enable(struct rte_eth_dev *dev)
static void
eth_allmulticast_disable(struct rte_eth_dev *dev)
{
+ struct pmd_internals *internals = (struct pmd_internals *)
+ dev->data->dev_private;
volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
- dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
+ internals->pci_rsc, SZEDATA2_CGMII_IBUF_BASE_OFF,
volatile struct szedata2_cgmii_ibuf *);
cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
}
@@ -1349,7 +1366,7 @@ static const struct eth_dev_ops ops = {
* -1 on error
*/
static int
-get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index)
+get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
{
DIR *dir;
struct dirent *entry;
@@ -1357,7 +1374,6 @@ get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index)
uint32_t tmp_index;
FILE *fd;
char pcislot_path[PATH_MAX];
- struct rte_pci_addr pcislot_addr = dev->pci_dev->addr;
uint32_t domain;
uint32_t bus;
uint32_t devid;
@@ -1392,10 +1408,10 @@ get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index)
if (ret != 4)
continue;
- if (pcislot_addr.domain == domain &&
- pcislot_addr.bus == bus &&
- pcislot_addr.devid == devid &&
- pcislot_addr.function == function) {
+ if (pcislot_addr->domain == domain &&
+ pcislot_addr->bus == bus &&
+ pcislot_addr->devid == devid &&
+ pcislot_addr->function == function) {
*index = tmp_index;
closedir(dir);
return 0;
@@ -1415,9 +1431,10 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
struct szedata *szedata_temp;
int ret;
uint32_t szedata2_index;
- struct rte_pci_addr *pci_addr = &dev->pci_dev->addr;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_addr *pci_addr = &pci_dev->addr;
struct rte_mem_resource *pci_rsc =
- &dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
+ &pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
char rsc_filename[PATH_MAX];
void *pci_resource_ptr = NULL;
int fd;
@@ -1427,7 +1444,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
pci_addr->function);
/* Get index of szedata2 device file and create path to device file */
- ret = get_szedata2_index(dev, &szedata2_index);
+ ret = get_szedata2_index(pci_addr, &szedata2_index);
if (ret != 0) {
RTE_LOG(ERR, PMD, "Failed to get szedata2 device index!\n");
return -ENODEV;
@@ -1471,10 +1488,10 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
/* Set function callbacks for Ethernet API */
dev->dev_ops = &ops;
- rte_eth_copy_pci_info(dev, dev->pci_dev);
+ rte_eth_copy_pci_info(dev, pci_dev);
/* mmap pci resource0 file to rte_mem_resource structure */
- if (dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
+ if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
0) {
RTE_LOG(ERR, PMD, "Missing resource%u file\n",
PCI_RESOURCE_NUMBER);
@@ -1491,7 +1508,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
}
pci_resource_ptr = mmap(0,
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
+ pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
if (pci_resource_ptr == NULL) {
@@ -1499,8 +1516,8 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
rsc_filename, fd);
return -EINVAL;
}
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr =
- pci_resource_ptr;
+ pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr;
+ internals->pci_rsc = pci_rsc;
RTE_LOG(DEBUG, PMD, "resource%u phys_addr = 0x%llx len = %llu "
"virt addr = %llx\n", PCI_RESOURCE_NUMBER,
@@ -1516,8 +1533,8 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
RTE_CACHE_LINE_SIZE);
if (data->mac_addrs == NULL) {
RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
- munmap(dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
+ munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
+ pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
return -EINVAL;
}
@@ -1537,12 +1554,13 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
static int
rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
{
- struct rte_pci_addr *pci_addr = &dev->pci_dev->addr;
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+ struct rte_pci_addr *pci_addr = &pci_dev->addr;
rte_free(dev->data->mac_addrs);
dev->data->mac_addrs = NULL;
- munmap(dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
+ munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
+ pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
RTE_LOG(INFO, PMD, "szedata2 device ("
PCI_PRI_FMT ") successfully uninitialized\n",
@@ -1570,16 +1588,26 @@ static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
}
};
-static struct eth_driver szedata2_eth_driver = {
- .pci_drv = {
- .id_table = rte_szedata2_pci_id_table,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = rte_szedata2_eth_dev_init,
- .eth_dev_uninit = rte_szedata2_eth_dev_uninit,
- .dev_private_size = sizeof(struct pmd_internals),
+static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct pmd_internals), rte_szedata2_eth_dev_init);
+}
+
+static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev,
+ rte_szedata2_eth_dev_uninit);
+}
+
+static struct rte_pci_driver szedata2_eth_driver = {
+ .id_table = rte_szedata2_pci_id_table,
+ .probe = szedata2_eth_pci_probe,
+ .remove = szedata2_eth_pci_remove,
};
-RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver.pci_drv);
+RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
+RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
+ "* combo6core & combov3 & szedata2 & szedata2_cv3");
diff --git a/drivers/net/szedata2/rte_eth_szedata2.h b/drivers/net/szedata2/rte_eth_szedata2.h
index 522cf47f..afe8a383 100644
--- a/drivers/net/szedata2/rte_eth_szedata2.h
+++ b/drivers/net/szedata2/rte_eth_szedata2.h
@@ -117,94 +117,82 @@ struct szedata {
* @return Byte from PCI resource at offset "offset".
*/
static inline uint8_t
-pci_resource_read8(struct rte_eth_dev *dev, uint32_t offset)
+pci_resource_read8(struct rte_mem_resource *rsc, uint32_t offset)
{
- return *((uint8_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset));
+ return *((uint8_t *)((uint8_t *)rsc->addr + offset));
}
/*
* @return Two bytes from PCI resource starting at offset "offset".
*/
static inline uint16_t
-pci_resource_read16(struct rte_eth_dev *dev, uint32_t offset)
+pci_resource_read16(struct rte_mem_resource *rsc, uint32_t offset)
{
- return rte_le_to_cpu_16(*((uint16_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)));
+ return rte_le_to_cpu_16(*((uint16_t *)((uint8_t *)rsc->addr +
+ offset)));
}
/*
* @return Four bytes from PCI resource starting at offset "offset".
*/
static inline uint32_t
-pci_resource_read32(struct rte_eth_dev *dev, uint32_t offset)
+pci_resource_read32(struct rte_mem_resource *rsc, uint32_t offset)
{
- return rte_le_to_cpu_32(*((uint32_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)));
+ return rte_le_to_cpu_32(*((uint32_t *)((uint8_t *)rsc->addr +
+ offset)));
}
/*
* @return Eight bytes from PCI resource starting at offset "offset".
*/
static inline uint64_t
-pci_resource_read64(struct rte_eth_dev *dev, uint32_t offset)
+pci_resource_read64(struct rte_mem_resource *rsc, uint32_t offset)
{
- return rte_le_to_cpu_64(*((uint64_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)));
+ return rte_le_to_cpu_64(*((uint64_t *)((uint8_t *)rsc->addr +
+ offset)));
}
/*
* Write one byte to PCI resource address space at offset "offset".
*/
static inline void
-pci_resource_write8(struct rte_eth_dev *dev, uint32_t offset, uint8_t val)
+pci_resource_write8(struct rte_mem_resource *rsc, uint32_t offset, uint8_t val)
{
- *((uint8_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)) = val;
+ *((uint8_t *)((uint8_t *)rsc->addr + offset)) = val;
}
/*
* Write two bytes to PCI resource address space at offset "offset".
*/
static inline void
-pci_resource_write16(struct rte_eth_dev *dev, uint32_t offset, uint16_t val)
+pci_resource_write16(struct rte_mem_resource *rsc, uint32_t offset,
+ uint16_t val)
{
- *((uint16_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)) = rte_cpu_to_le_16(val);
+ *((uint16_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_16(val);
}
/*
* Write four bytes to PCI resource address space at offset "offset".
*/
static inline void
-pci_resource_write32(struct rte_eth_dev *dev, uint32_t offset, uint32_t val)
+pci_resource_write32(struct rte_mem_resource *rsc, uint32_t offset,
+ uint32_t val)
{
- *((uint32_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)) = rte_cpu_to_le_32(val);
+ *((uint32_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_32(val);
}
/*
* Write eight bytes to PCI resource address space at offset "offset".
*/
static inline void
-pci_resource_write64(struct rte_eth_dev *dev, uint32_t offset, uint64_t val)
+pci_resource_write64(struct rte_mem_resource *rsc, uint32_t offset,
+ uint64_t val)
{
- *((uint64_t *)((uint8_t *)
- dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr +
- offset)) = rte_cpu_to_le_64(val);
+ *((uint64_t *)((uint8_t *)rsc->addr + offset)) = rte_cpu_to_le_64(val);
}
-#define SZEDATA2_PCI_RESOURCE_PTR(dev, offset, type) \
- ((type)((uint8_t *) \
- ((dev)->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr) \
- + (offset)))
+#define SZEDATA2_PCI_RESOURCE_PTR(rsc, offset, type) \
+ ((type)(((uint8_t *)(rsc)->addr) + (offset)))
enum szedata2_link_speed {
SZEDATA2_LINK_SPEED_DEFAULT = 0,
diff --git a/drivers/net/tap/Makefile b/drivers/net/tap/Makefile
new file mode 100644
index 00000000..b0de0284
--- /dev/null
+++ b/drivers/net/tap/Makefile
@@ -0,0 +1,93 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_tap.a
+
+EXPORT_MAP := rte_pmd_tap_version.map
+
+LIBABIVER := 1
+
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += -I.
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += rte_eth_tap.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_netlink.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP) += tap_tcmsgs.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
+# Generate and clean-up tap_autoconf.h.
+
+export CC CFLAGS CPPFLAGS EXTRA_CFLAGS EXTRA_CPPFLAGS
+export AUTO_CONFIG_CFLAGS = -Wno-error
+
+ifndef V
+AUTOCONF_OUTPUT := >/dev/null
+endif
+
+tap_autoconf.h.new: FORCE
+
+tap_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh
+ $Q $(RM) -f -- '$@'
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_FLOWER \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_UNSPEC \
+ $(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_TC_VLAN_ID \
+ linux/pkt_cls.h \
+ enum TCA_FLOWER_KEY_VLAN_PRIO \
+ $(AUTOCONF_OUTPUT)
+
+# Create tap_autoconf.h or update it in case it differs from the new one.
+
+tap_autoconf.h: tap_autoconf.h.new
+ $Q [ -f '$@' ] && \
+ cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \
+ mv '$<' '$@'
+
+$(SRCS-$(CONFIG_RTE_LIBRTE_PMD_TAP):.c=.o): tap_autoconf.h
+
+clean_tap: FORCE
+ $Q rm -f -- tap_autoconf.h tap_autoconf.h.new
+
+clean: clean_tap
diff --git a/drivers/net/tap/rte_eth_tap.c b/drivers/net/tap/rte_eth_tap.c
new file mode 100644
index 00000000..e44de027
--- /dev/null
+++ b/drivers/net/tap/rte_eth_tap.c
@@ -0,0 +1,1394 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
+#include <rte_malloc.h>
+#include <rte_vdev.h>
+#include <rte_kvargs.h>
+#include <rte_net.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <sys/utsname.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdint.h>
+#include <sys/uio.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <linux/if_tun.h>
+#include <linux/if_ether.h>
+#include <linux/version.h>
+#include <fcntl.h>
+
+#include <rte_eth_tap.h>
+#include <tap_flow.h>
+#include <tap_netlink.h>
+#include <tap_tcmsgs.h>
+
+/* Linux based path to the TUN device */
+#define TUN_TAP_DEV_PATH "/dev/net/tun"
+#define DEFAULT_TAP_NAME "dtap"
+
+#define ETH_TAP_IFACE_ARG "iface"
+#define ETH_TAP_SPEED_ARG "speed"
+#define ETH_TAP_REMOTE_ARG "remote"
+
+#define FLOWER_KERNEL_VERSION KERNEL_VERSION(4, 2, 0)
+#define FLOWER_VLAN_KERNEL_VERSION KERNEL_VERSION(4, 9, 0)
+
+static struct rte_vdev_driver pmd_tap_drv;
+
+static const char *valid_arguments[] = {
+ ETH_TAP_IFACE_ARG,
+ ETH_TAP_SPEED_ARG,
+ ETH_TAP_REMOTE_ARG,
+ NULL
+};
+
+static int tap_unit;
+
+static volatile uint32_t tap_trigger; /* Rx trigger */
+
+static struct rte_eth_link pmd_link = {
+ .link_speed = ETH_SPEED_NUM_10G,
+ .link_duplex = ETH_LINK_FULL_DUPLEX,
+ .link_status = ETH_LINK_DOWN,
+ .link_autoneg = ETH_LINK_SPEED_AUTONEG
+};
+
+static void
+tap_trigger_cb(int sig __rte_unused)
+{
+ /* Valid trigger values are nonzero */
+ tap_trigger = (tap_trigger + 1) | 0x80000000;
+}
+
+/* Specifies on what netdevices the ioctl should be applied */
+enum ioctl_mode {
+ LOCAL_AND_REMOTE,
+ LOCAL_ONLY,
+ REMOTE_ONLY,
+};
+
+static int
+tap_ioctl(struct pmd_internals *pmd, unsigned long request,
+ struct ifreq *ifr, int set, enum ioctl_mode mode);
+
+static int tap_intr_handle_set(struct rte_eth_dev *dev, int set);
+
+/* Tun/Tap allocation routine
+ *
+ * name is the number of the interface to use, unless NULL to take the host
+ * supplied name.
+ */
+static int
+tun_alloc(struct pmd_internals *pmd, uint16_t qid)
+{
+ struct ifreq ifr;
+#ifdef IFF_MULTI_QUEUE
+ unsigned int features;
+#endif
+ int fd;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+
+ /*
+ * Do not set IFF_NO_PI as packet information header will be needed
+ * to check if a received packet has been truncated.
+ */
+ ifr.ifr_flags = IFF_TAP;
+ snprintf(ifr.ifr_name, IFNAMSIZ, "%s", pmd->name);
+
+ RTE_LOG(DEBUG, PMD, "ifr_name '%s'\n", ifr.ifr_name);
+
+ fd = open(TUN_TAP_DEV_PATH, O_RDWR);
+ if (fd < 0) {
+ RTE_LOG(ERR, PMD, "Unable to create TAP interface");
+ goto error;
+ }
+
+#ifdef IFF_MULTI_QUEUE
+ /* Grab the TUN features to verify we can work multi-queue */
+ if (ioctl(fd, TUNGETFEATURES, &features) < 0) {
+ RTE_LOG(ERR, PMD, "TAP unable to get TUN/TAP features\n");
+ goto error;
+ }
+ RTE_LOG(DEBUG, PMD, " TAP Features %08x\n", features);
+
+ if (features & IFF_MULTI_QUEUE) {
+ RTE_LOG(DEBUG, PMD, " Multi-queue support for %d queues\n",
+ RTE_PMD_TAP_MAX_QUEUES);
+ ifr.ifr_flags |= IFF_MULTI_QUEUE;
+ } else
+#endif
+ {
+ ifr.ifr_flags |= IFF_ONE_QUEUE;
+ RTE_LOG(DEBUG, PMD, " Single queue only support\n");
+ }
+
+ /* Set the TUN/TAP configuration and set the name if needed */
+ if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
+ RTE_LOG(WARNING, PMD,
+ "Unable to set TUNSETIFF for %s\n",
+ ifr.ifr_name);
+ perror("TUNSETIFF");
+ goto error;
+ }
+
+ /* Always set the file descriptor to non-blocking */
+ if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) {
+ RTE_LOG(WARNING, PMD,
+ "Unable to set %s to nonblocking\n",
+ ifr.ifr_name);
+ perror("F_SETFL, NONBLOCK");
+ goto error;
+ }
+
+ /* Set up trigger to optimize empty Rx bursts */
+ errno = 0;
+ do {
+ struct sigaction sa;
+ int flags = fcntl(fd, F_GETFL);
+
+ if (flags == -1 || sigaction(SIGIO, NULL, &sa) == -1)
+ break;
+ if (sa.sa_handler != tap_trigger_cb) {
+ /*
+ * Make sure SIGIO is not already taken. This is done
+ * as late as possible to leave the application a
+ * chance to set up its own signal handler first.
+ */
+ if (sa.sa_handler != SIG_IGN &&
+ sa.sa_handler != SIG_DFL) {
+ errno = EBUSY;
+ break;
+ }
+ sa = (struct sigaction){
+ .sa_flags = SA_RESTART,
+ .sa_handler = tap_trigger_cb,
+ };
+ if (sigaction(SIGIO, &sa, NULL) == -1)
+ break;
+ }
+ /* Enable SIGIO on file descriptor */
+ fcntl(fd, F_SETFL, flags | O_ASYNC);
+ fcntl(fd, F_SETOWN, getpid());
+ } while (0);
+ if (errno) {
+ /* Disable trigger globally in case of error */
+ tap_trigger = 0;
+ RTE_LOG(WARNING, PMD, "Rx trigger disabled: %s\n",
+ strerror(errno));
+ }
+
+ if (qid == 0) {
+ struct ifreq ifr;
+
+ /*
+ * pmd->eth_addr contains the desired MAC, either from remote
+ * or from a random assignment. Sync it with the tap netdevice.
+ */
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, &pmd->eth_addr,
+ ETHER_ADDR_LEN);
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 0, LOCAL_ONLY) < 0)
+ goto error;
+
+ pmd->if_index = if_nametoindex(pmd->name);
+ if (!pmd->if_index) {
+ RTE_LOG(ERR, PMD,
+ "Could not find ifindex for %s: rte_flow won't be usable.\n",
+ pmd->name);
+ return fd;
+ }
+ if (!pmd->flower_support)
+ return fd;
+ if (qdisc_create_multiq(pmd->nlsk_fd, pmd->if_index) < 0) {
+ RTE_LOG(ERR, PMD,
+ "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
+ pmd->name);
+ return fd;
+ }
+ if (qdisc_create_ingress(pmd->nlsk_fd, pmd->if_index) < 0) {
+ RTE_LOG(ERR, PMD,
+ "Could not create multiq qdisc for %s: rte_flow won't be usable.\n",
+ pmd->name);
+ return fd;
+ }
+ if (pmd->remote_if_index) {
+ /*
+ * Flush usually returns negative value because it tries
+ * to delete every QDISC (and on a running device, one
+ * QDISC at least is needed). Ignore negative return
+ * value.
+ */
+ qdisc_flush(pmd->nlsk_fd, pmd->remote_if_index);
+ if (qdisc_create_ingress(pmd->nlsk_fd,
+ pmd->remote_if_index) < 0)
+ goto remote_fail;
+ LIST_INIT(&pmd->implicit_flows);
+ if (tap_flow_implicit_create(
+ pmd, TAP_REMOTE_LOCAL_MAC) < 0)
+ goto remote_fail;
+ if (tap_flow_implicit_create(
+ pmd, TAP_REMOTE_BROADCAST) < 0)
+ goto remote_fail;
+ if (tap_flow_implicit_create(
+ pmd, TAP_REMOTE_BROADCASTV6) < 0)
+ goto remote_fail;
+ if (tap_flow_implicit_create(
+ pmd, TAP_REMOTE_TX) < 0)
+ goto remote_fail;
+ }
+ }
+
+ return fd;
+
+remote_fail:
+ RTE_LOG(ERR, PMD,
+ "Could not set up remote flow rules for %s: remote disabled.\n",
+ pmd->name);
+ pmd->remote_if_index = 0;
+ tap_flow_implicit_flush(pmd, NULL);
+ return fd;
+
+error:
+ if (fd > 0)
+ close(fd);
+ return -1;
+}
+
+/* Callback to handle the rx burst of packets to the correct interface and
+ * file descriptor(s) in a multi-queue setup.
+ */
+static uint16_t
+pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct rx_queue *rxq = queue;
+ uint16_t num_rx;
+ unsigned long num_rx_bytes = 0;
+ uint32_t trigger = tap_trigger;
+
+ if (trigger == rxq->trigger_seen)
+ return 0;
+ if (trigger)
+ rxq->trigger_seen = trigger;
+ rte_compiler_barrier();
+ for (num_rx = 0; num_rx < nb_pkts; ) {
+ struct rte_mbuf *mbuf = rxq->pool;
+ struct rte_mbuf *seg = NULL;
+ struct rte_mbuf *new_tail = NULL;
+ uint16_t data_off = rte_pktmbuf_headroom(mbuf);
+ int len;
+
+ len = readv(rxq->fd, *rxq->iovecs,
+ 1 + (rxq->rxmode->enable_scatter ?
+ rxq->nb_rx_desc : 1));
+ if (len < (int)sizeof(struct tun_pi))
+ break;
+
+ /* Packet couldn't fit in the provided mbuf */
+ if (unlikely(rxq->pi.flags & TUN_PKT_STRIP)) {
+ rxq->stats.ierrors++;
+ continue;
+ }
+
+ len -= sizeof(struct tun_pi);
+
+ mbuf->pkt_len = len;
+ mbuf->port = rxq->in_port;
+ while (1) {
+ struct rte_mbuf *buf = rte_pktmbuf_alloc(rxq->mp);
+
+ if (unlikely(!buf)) {
+ rxq->stats.rx_nombuf++;
+ /* No new buf has been allocated: do nothing */
+ if (!new_tail || !seg)
+ goto end;
+
+ seg->next = NULL;
+ rte_pktmbuf_free(mbuf);
+
+ goto end;
+ }
+ seg = seg ? seg->next : mbuf;
+ if (rxq->pool == mbuf)
+ rxq->pool = buf;
+ if (new_tail)
+ new_tail->next = buf;
+ new_tail = buf;
+ new_tail->next = seg->next;
+
+ /* iovecs[0] is reserved for packet info (pi) */
+ (*rxq->iovecs)[mbuf->nb_segs].iov_len =
+ buf->buf_len - data_off;
+ (*rxq->iovecs)[mbuf->nb_segs].iov_base =
+ (char *)buf->buf_addr + data_off;
+
+ seg->data_len = RTE_MIN(seg->buf_len - data_off, len);
+ seg->data_off = data_off;
+
+ len -= seg->data_len;
+ if (len <= 0)
+ break;
+ mbuf->nb_segs++;
+ /* First segment has headroom, not the others */
+ data_off = 0;
+ }
+ seg->next = NULL;
+ mbuf->packet_type = rte_net_get_ptype(mbuf, NULL,
+ RTE_PTYPE_ALL_MASK);
+
+ /* account for the receive frame */
+ bufs[num_rx++] = mbuf;
+ num_rx_bytes += mbuf->pkt_len;
+ }
+end:
+ rxq->stats.ipackets += num_rx;
+ rxq->stats.ibytes += num_rx_bytes;
+
+ return num_rx;
+}
+
+/* Callback to handle sending packets from the tap interface
+ */
+static uint16_t
+pmd_tx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct tx_queue *txq = queue;
+ uint16_t num_tx = 0;
+ unsigned long num_tx_bytes = 0;
+ uint32_t max_size;
+ int i;
+
+ if (unlikely(nb_pkts == 0))
+ return 0;
+
+ max_size = *txq->mtu + (ETHER_HDR_LEN + ETHER_CRC_LEN + 4);
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = bufs[num_tx];
+ struct iovec iovecs[mbuf->nb_segs + 1];
+ struct tun_pi pi = { .flags = 0 };
+ struct rte_mbuf *seg = mbuf;
+ int n;
+ int j;
+
+ /* stats.errs will be incremented */
+ if (rte_pktmbuf_pkt_len(mbuf) > max_size)
+ break;
+
+ iovecs[0].iov_base = &pi;
+ iovecs[0].iov_len = sizeof(pi);
+ for (j = 1; j <= mbuf->nb_segs; j++) {
+ iovecs[j].iov_len = rte_pktmbuf_data_len(seg);
+ iovecs[j].iov_base =
+ rte_pktmbuf_mtod(seg, void *);
+ seg = seg->next;
+ }
+ /* copy the tx frame data */
+ n = writev(txq->fd, iovecs, mbuf->nb_segs + 1);
+ if (n <= 0)
+ break;
+
+ num_tx++;
+ num_tx_bytes += mbuf->pkt_len;
+ rte_pktmbuf_free(mbuf);
+ }
+
+ txq->stats.opackets += num_tx;
+ txq->stats.errs += nb_pkts - num_tx;
+ txq->stats.obytes += num_tx_bytes;
+
+ return num_tx;
+}
+
+static int
+tap_ioctl(struct pmd_internals *pmd, unsigned long request,
+ struct ifreq *ifr, int set, enum ioctl_mode mode)
+{
+ short req_flags = ifr->ifr_flags;
+ int remote = pmd->remote_if_index &&
+ (mode == REMOTE_ONLY || mode == LOCAL_AND_REMOTE);
+
+ if (!pmd->remote_if_index && mode == REMOTE_ONLY)
+ return 0;
+ /*
+ * If there is a remote netdevice, apply ioctl on it, then apply it on
+ * the tap netdevice.
+ */
+apply:
+ if (remote)
+ snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->remote_iface);
+ else if (mode == LOCAL_ONLY || mode == LOCAL_AND_REMOTE)
+ snprintf(ifr->ifr_name, IFNAMSIZ, "%s", pmd->name);
+ switch (request) {
+ case SIOCSIFFLAGS:
+ /* fetch current flags to leave other flags untouched */
+ if (ioctl(pmd->ioctl_sock, SIOCGIFFLAGS, ifr) < 0)
+ goto error;
+ if (set)
+ ifr->ifr_flags |= req_flags;
+ else
+ ifr->ifr_flags &= ~req_flags;
+ break;
+ case SIOCGIFFLAGS:
+ case SIOCGIFHWADDR:
+ case SIOCSIFHWADDR:
+ case SIOCSIFMTU:
+ break;
+ default:
+ RTE_LOG(WARNING, PMD, "%s: ioctl() called with wrong arg\n",
+ pmd->name);
+ return -EINVAL;
+ }
+ if (ioctl(pmd->ioctl_sock, request, ifr) < 0)
+ goto error;
+ if (remote-- && mode == LOCAL_AND_REMOTE)
+ goto apply;
+ return 0;
+
+error:
+ RTE_LOG(ERR, PMD, "%s: ioctl(%lu) failed with error: %s\n",
+ ifr->ifr_name, request, strerror(errno));
+ return -errno;
+}
+
+static int
+tap_link_set_down(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_UP };
+
+ dev->data->dev_link.link_status = ETH_LINK_DOWN;
+ return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+}
+
+static int
+tap_link_set_up(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_UP };
+
+ dev->data->dev_link.link_status = ETH_LINK_UP;
+ return tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+}
+
+static int
+tap_dev_start(struct rte_eth_dev *dev)
+{
+ int err;
+
+ err = tap_intr_handle_set(dev, 1);
+ if (err)
+ return err;
+ return tap_link_set_up(dev);
+}
+
+/* This function gets called when the current port gets stopped.
+ */
+static void
+tap_dev_stop(struct rte_eth_dev *dev)
+{
+ tap_intr_handle_set(dev, 0);
+ tap_link_set_down(dev);
+}
+
+static int
+tap_dev_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static uint32_t
+tap_dev_speed_capa(void)
+{
+ uint32_t speed = pmd_link.link_speed;
+ uint32_t capa = 0;
+
+ if (speed >= ETH_SPEED_NUM_10M)
+ capa |= ETH_LINK_SPEED_10M;
+ if (speed >= ETH_SPEED_NUM_100M)
+ capa |= ETH_LINK_SPEED_100M;
+ if (speed >= ETH_SPEED_NUM_1G)
+ capa |= ETH_LINK_SPEED_1G;
+ if (speed >= ETH_SPEED_NUM_5G)
+ capa |= ETH_LINK_SPEED_2_5G;
+ if (speed >= ETH_SPEED_NUM_5G)
+ capa |= ETH_LINK_SPEED_5G;
+ if (speed >= ETH_SPEED_NUM_10G)
+ capa |= ETH_LINK_SPEED_10G;
+ if (speed >= ETH_SPEED_NUM_20G)
+ capa |= ETH_LINK_SPEED_20G;
+ if (speed >= ETH_SPEED_NUM_25G)
+ capa |= ETH_LINK_SPEED_25G;
+ if (speed >= ETH_SPEED_NUM_40G)
+ capa |= ETH_LINK_SPEED_40G;
+ if (speed >= ETH_SPEED_NUM_50G)
+ capa |= ETH_LINK_SPEED_50G;
+ if (speed >= ETH_SPEED_NUM_56G)
+ capa |= ETH_LINK_SPEED_56G;
+ if (speed >= ETH_SPEED_NUM_100G)
+ capa |= ETH_LINK_SPEED_100G;
+
+ return capa;
+}
+
+static void
+tap_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ dev_info->if_index = internals->if_index;
+ dev_info->max_mac_addrs = 1;
+ dev_info->max_rx_pktlen = (uint32_t)ETHER_MAX_VLAN_FRAME_LEN;
+ dev_info->max_rx_queues = internals->nb_queues;
+ dev_info->max_tx_queues = internals->nb_queues;
+ dev_info->min_rx_bufsize = 0;
+ dev_info->pci_dev = NULL;
+ dev_info->speed_capa = tap_dev_speed_capa();
+}
+
+static void
+tap_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *tap_stats)
+{
+ unsigned int i, imax;
+ unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
+ unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
+ unsigned long rx_nombuf = 0, ierrors = 0;
+ const struct pmd_internals *pmd = dev->data->dev_private;
+
+ imax = (pmd->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS) ?
+ pmd->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS;
+
+ for (i = 0; i < imax; i++) {
+ tap_stats->q_ipackets[i] = pmd->rxq[i].stats.ipackets;
+ tap_stats->q_ibytes[i] = pmd->rxq[i].stats.ibytes;
+ rx_total += tap_stats->q_ipackets[i];
+ rx_bytes_total += tap_stats->q_ibytes[i];
+ rx_nombuf += pmd->rxq[i].stats.rx_nombuf;
+ ierrors += pmd->rxq[i].stats.ierrors;
+
+ tap_stats->q_opackets[i] = pmd->txq[i].stats.opackets;
+ tap_stats->q_errors[i] = pmd->txq[i].stats.errs;
+ tap_stats->q_obytes[i] = pmd->txq[i].stats.obytes;
+ tx_total += tap_stats->q_opackets[i];
+ tx_err_total += tap_stats->q_errors[i];
+ tx_bytes_total += tap_stats->q_obytes[i];
+ }
+
+ tap_stats->ipackets = rx_total;
+ tap_stats->ibytes = rx_bytes_total;
+ tap_stats->ierrors = ierrors;
+ tap_stats->rx_nombuf = rx_nombuf;
+ tap_stats->opackets = tx_total;
+ tap_stats->oerrors = tx_err_total;
+ tap_stats->obytes = tx_bytes_total;
+}
+
+static void
+tap_stats_reset(struct rte_eth_dev *dev)
+{
+ int i;
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ for (i = 0; i < pmd->nb_queues; i++) {
+ pmd->rxq[i].stats.ipackets = 0;
+ pmd->rxq[i].stats.ibytes = 0;
+ pmd->rxq[i].stats.ierrors = 0;
+ pmd->rxq[i].stats.rx_nombuf = 0;
+
+ pmd->txq[i].stats.opackets = 0;
+ pmd->txq[i].stats.errs = 0;
+ pmd->txq[i].stats.obytes = 0;
+ }
+}
+
+static void
+tap_dev_close(struct rte_eth_dev *dev __rte_unused)
+{
+ int i;
+ struct pmd_internals *internals = dev->data->dev_private;
+
+ tap_link_set_down(dev);
+ tap_flow_flush(dev, NULL);
+ tap_flow_implicit_flush(internals, NULL);
+
+ for (i = 0; i < internals->nb_queues; i++) {
+ if (internals->rxq[i].fd != -1)
+ close(internals->rxq[i].fd);
+ internals->rxq[i].fd = -1;
+ internals->txq[i].fd = -1;
+ }
+}
+
+static void
+tap_rx_queue_release(void *queue)
+{
+ struct rx_queue *rxq = queue;
+
+ if (rxq && (rxq->fd > 0)) {
+ close(rxq->fd);
+ rxq->fd = -1;
+ rte_pktmbuf_free(rxq->pool);
+ rte_free(rxq->iovecs);
+ rxq->pool = NULL;
+ rxq->iovecs = NULL;
+ }
+}
+
+static void
+tap_tx_queue_release(void *queue)
+{
+ struct tx_queue *txq = queue;
+
+ if (txq && (txq->fd > 0)) {
+ close(txq->fd);
+ txq->fd = -1;
+ }
+}
+
+static int
+tap_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
+{
+ struct rte_eth_link *dev_link = &dev->data->dev_link;
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = 0 };
+
+ if (pmd->remote_if_index) {
+ tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, REMOTE_ONLY);
+ if (!(ifr.ifr_flags & IFF_UP) ||
+ !(ifr.ifr_flags & IFF_RUNNING)) {
+ dev_link->link_status = ETH_LINK_DOWN;
+ return 0;
+ }
+ }
+ tap_ioctl(pmd, SIOCGIFFLAGS, &ifr, 0, LOCAL_ONLY);
+ dev_link->link_status =
+ ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING) ?
+ ETH_LINK_UP :
+ ETH_LINK_DOWN);
+ return 0;
+}
+
+static void
+tap_promisc_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
+
+ dev->data->promiscuous = 1;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index)
+ tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC);
+}
+
+static void
+tap_promisc_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_PROMISC };
+
+ dev->data->promiscuous = 0;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index)
+ tap_flow_implicit_destroy(pmd, TAP_REMOTE_PROMISC);
+}
+
+static void
+tap_allmulti_enable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
+
+ dev->data->all_multicast = 1;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 1, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index)
+ tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI);
+}
+
+static void
+tap_allmulti_disable(struct rte_eth_dev *dev)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_flags = IFF_ALLMULTI };
+
+ dev->data->all_multicast = 0;
+ tap_ioctl(pmd, SIOCSIFFLAGS, &ifr, 0, LOCAL_AND_REMOTE);
+ if (pmd->remote_if_index)
+ tap_flow_implicit_destroy(pmd, TAP_REMOTE_ALLMULTI);
+}
+
+
+static void
+tap_mac_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr;
+
+ if (is_zero_ether_addr(mac_addr)) {
+ RTE_LOG(ERR, PMD, "%s: can't set an empty MAC address\n",
+ dev->data->name);
+ return;
+ }
+ /* Check the actual current MAC address on the tap netdevice */
+ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, LOCAL_ONLY) != 0) {
+ RTE_LOG(ERR, PMD,
+ "%s: couldn't check current tap MAC address\n",
+ dev->data->name);
+ return;
+ }
+ if (is_same_ether_addr((struct ether_addr *)&ifr.ifr_hwaddr.sa_data,
+ mac_addr))
+ return;
+
+ ifr.ifr_hwaddr.sa_family = AF_LOCAL;
+ rte_memcpy(ifr.ifr_hwaddr.sa_data, mac_addr, ETHER_ADDR_LEN);
+ if (tap_ioctl(pmd, SIOCSIFHWADDR, &ifr, 1, LOCAL_AND_REMOTE) < 0)
+ return;
+ rte_memcpy(&pmd->eth_addr, mac_addr, ETHER_ADDR_LEN);
+ if (pmd->remote_if_index) {
+ /* Replace MAC redirection rule after a MAC change */
+ if (tap_flow_implicit_destroy(pmd, TAP_REMOTE_LOCAL_MAC) < 0) {
+ RTE_LOG(ERR, PMD,
+ "%s: Couldn't delete MAC redirection rule\n",
+ dev->data->name);
+ return;
+ }
+ if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
+ RTE_LOG(ERR, PMD,
+ "%s: Couldn't add MAC redirection rule\n",
+ dev->data->name);
+ }
+}
+
+static int
+tap_setup_queue(struct rte_eth_dev *dev,
+ struct pmd_internals *internals,
+ uint16_t qid)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rx_queue *rx = &internals->rxq[qid];
+ struct tx_queue *tx = &internals->txq[qid];
+ int fd;
+
+ fd = rx->fd;
+ if (fd < 0) {
+ fd = tx->fd;
+ if (fd < 0) {
+ RTE_LOG(INFO, PMD, "Add queue to TAP %s for qid %d\n",
+ pmd->name, qid);
+ fd = tun_alloc(pmd, qid);
+ if (fd < 0) {
+ RTE_LOG(ERR, PMD, "tun_alloc(%s, %d) failed\n",
+ pmd->name, qid);
+ return -1;
+ }
+ if (qid == 0) {
+ struct ifreq ifr;
+
+ ifr.ifr_mtu = dev->data->mtu;
+ if (tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1,
+ LOCAL_AND_REMOTE) < 0) {
+ close(fd);
+ return -1;
+ }
+ }
+ }
+ }
+
+ rx->fd = fd;
+ tx->fd = fd;
+ tx->mtu = &dev->data->mtu;
+ rx->rxmode = &dev->data->dev_conf.rxmode;
+
+ return fd;
+}
+
+static int
+rx_setup_queue(struct rte_eth_dev *dev,
+ struct pmd_internals *internals,
+ uint16_t qid)
+{
+ dev->data->rx_queues[qid] = &internals->rxq[qid];
+
+ return tap_setup_queue(dev, internals, qid);
+}
+
+static int
+tx_setup_queue(struct rte_eth_dev *dev,
+ struct pmd_internals *internals,
+ uint16_t qid)
+{
+ dev->data->tx_queues[qid] = &internals->txq[qid];
+
+ return tap_setup_queue(dev, internals, qid);
+}
+
+static int
+tap_rx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id,
+ uint16_t nb_rx_desc,
+ unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ struct rx_queue *rxq = &internals->rxq[rx_queue_id];
+ struct rte_mbuf **tmp = &rxq->pool;
+ long iov_max = sysconf(_SC_IOV_MAX);
+ uint16_t nb_desc = RTE_MIN(nb_rx_desc, iov_max - 1);
+ struct iovec (*iovecs)[nb_desc + 1];
+ int data_off = RTE_PKTMBUF_HEADROOM;
+ int ret = 0;
+ int fd;
+ int i;
+
+ if ((rx_queue_id >= internals->nb_queues) || !mp) {
+ RTE_LOG(WARNING, PMD,
+ "nb_queues %d too small or mempool NULL\n",
+ internals->nb_queues);
+ return -1;
+ }
+
+ rxq->mp = mp;
+ rxq->trigger_seen = 1; /* force initial burst */
+ rxq->in_port = dev->data->port_id;
+ rxq->nb_rx_desc = nb_desc;
+ iovecs = rte_zmalloc_socket(dev->data->name, sizeof(*iovecs), 0,
+ socket_id);
+ if (!iovecs) {
+ RTE_LOG(WARNING, PMD,
+ "%s: Couldn't allocate %d RX descriptors\n",
+ dev->data->name, nb_desc);
+ return -ENOMEM;
+ }
+ rxq->iovecs = iovecs;
+
+ fd = rx_setup_queue(dev, internals, rx_queue_id);
+ if (fd == -1) {
+ ret = fd;
+ goto error;
+ }
+
+ (*rxq->iovecs)[0].iov_len = sizeof(struct tun_pi);
+ (*rxq->iovecs)[0].iov_base = &rxq->pi;
+
+ for (i = 1; i <= nb_desc; i++) {
+ *tmp = rte_pktmbuf_alloc(rxq->mp);
+ if (!*tmp) {
+ RTE_LOG(WARNING, PMD,
+ "%s: couldn't allocate memory for queue %d\n",
+ dev->data->name, rx_queue_id);
+ ret = -ENOMEM;
+ goto error;
+ }
+ (*rxq->iovecs)[i].iov_len = (*tmp)->buf_len - data_off;
+ (*rxq->iovecs)[i].iov_base =
+ (char *)(*tmp)->buf_addr + data_off;
+ data_off = 0;
+ tmp = &(*tmp)->next;
+ }
+
+ RTE_LOG(DEBUG, PMD, " RX TAP device name %s, qid %d on fd %d\n",
+ internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
+
+ return 0;
+
+error:
+ rte_pktmbuf_free(rxq->pool);
+ rxq->pool = NULL;
+ rte_free(rxq->iovecs);
+ rxq->iovecs = NULL;
+ return ret;
+}
+
+static int
+tap_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id,
+ uint16_t nb_tx_desc __rte_unused,
+ unsigned int socket_id __rte_unused,
+ const struct rte_eth_txconf *tx_conf __rte_unused)
+{
+ struct pmd_internals *internals = dev->data->dev_private;
+ int ret;
+
+ if (tx_queue_id >= internals->nb_queues)
+ return -1;
+
+ ret = tx_setup_queue(dev, internals, tx_queue_id);
+ if (ret == -1)
+ return -1;
+
+ RTE_LOG(DEBUG, PMD, " TX TAP device name %s, qid %d on fd %d\n",
+ internals->name, tx_queue_id, internals->txq[tx_queue_id].fd);
+
+ return 0;
+}
+
+static int
+tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifreq ifr = { .ifr_mtu = mtu };
+ int err = 0;
+
+ err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
+ if (!err)
+ dev->data->mtu = mtu;
+
+ return err;
+}
+
+static int
+tap_set_mc_addr_list(struct rte_eth_dev *dev __rte_unused,
+ struct ether_addr *mc_addr_set __rte_unused,
+ uint32_t nb_mc_addr __rte_unused)
+{
+ /*
+ * Nothing to do actually: the tap has no filtering whatsoever, every
+ * packet is received.
+ */
+ return 0;
+}
+
+static int
+tap_nl_msg_handler(struct nlmsghdr *nh, void *arg)
+{
+ struct rte_eth_dev *dev = arg;
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct ifinfomsg *info = NLMSG_DATA(nh);
+
+ if (nh->nlmsg_type != RTM_NEWLINK ||
+ (info->ifi_index != pmd->if_index &&
+ info->ifi_index != pmd->remote_if_index))
+ return 0;
+ return tap_link_update(dev, 0);
+}
+
+static void
+tap_dev_intr_handler(void *cb_arg)
+{
+ struct rte_eth_dev *dev = cb_arg;
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ nl_recv(pmd->intr_handle.fd, tap_nl_msg_handler, dev);
+}
+
+static int
+tap_intr_handle_set(struct rte_eth_dev *dev, int set)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ /* In any case, disable interrupt if the conf is no longer there. */
+ if (!dev->data->dev_conf.intr_conf.lsc) {
+ if (pmd->intr_handle.fd != -1)
+ nl_final(pmd->intr_handle.fd);
+ rte_intr_callback_unregister(
+ &pmd->intr_handle, tap_dev_intr_handler, dev);
+ return 0;
+ }
+ if (set) {
+ pmd->intr_handle.fd = nl_init(RTMGRP_LINK);
+ if (unlikely(pmd->intr_handle.fd == -1))
+ return -EBADF;
+ return rte_intr_callback_register(
+ &pmd->intr_handle, tap_dev_intr_handler, dev);
+ }
+ nl_final(pmd->intr_handle.fd);
+ return rte_intr_callback_unregister(&pmd->intr_handle,
+ tap_dev_intr_handler, dev);
+}
+
+static const uint32_t*
+tap_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
+{
+ static const uint32_t ptypes[] = {
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L2_ETHER_VLAN,
+ RTE_PTYPE_INNER_L2_ETHER_QINQ,
+ RTE_PTYPE_INNER_L3_IPV4,
+ RTE_PTYPE_INNER_L3_IPV4_EXT,
+ RTE_PTYPE_INNER_L3_IPV6,
+ RTE_PTYPE_INNER_L3_IPV6_EXT,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_SCTP,
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L2_ETHER_QINQ,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_SCTP,
+ };
+
+ return ptypes;
+}
+
+static int
+tap_flow_ctrl_get(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ fc_conf->mode = RTE_FC_NONE;
+ return 0;
+}
+
+static int
+tap_flow_ctrl_set(struct rte_eth_dev *dev __rte_unused,
+ struct rte_eth_fc_conf *fc_conf)
+{
+ if (fc_conf->mode != RTE_FC_NONE)
+ return -ENOTSUP;
+ return 0;
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = tap_dev_start,
+ .dev_stop = tap_dev_stop,
+ .dev_close = tap_dev_close,
+ .dev_configure = tap_dev_configure,
+ .dev_infos_get = tap_dev_info,
+ .rx_queue_setup = tap_rx_queue_setup,
+ .tx_queue_setup = tap_tx_queue_setup,
+ .rx_queue_release = tap_rx_queue_release,
+ .tx_queue_release = tap_tx_queue_release,
+ .flow_ctrl_get = tap_flow_ctrl_get,
+ .flow_ctrl_set = tap_flow_ctrl_set,
+ .link_update = tap_link_update,
+ .dev_set_link_up = tap_link_set_up,
+ .dev_set_link_down = tap_link_set_down,
+ .promiscuous_enable = tap_promisc_enable,
+ .promiscuous_disable = tap_promisc_disable,
+ .allmulticast_enable = tap_allmulti_enable,
+ .allmulticast_disable = tap_allmulti_disable,
+ .mac_addr_set = tap_mac_set,
+ .mtu_set = tap_mtu_set,
+ .set_mc_addr_list = tap_set_mc_addr_list,
+ .stats_get = tap_stats_get,
+ .stats_reset = tap_stats_reset,
+ .dev_supported_ptypes_get = tap_dev_supported_ptypes_get,
+ .filter_ctrl = tap_dev_filter_ctrl,
+};
+
+static int
+tap_kernel_support(struct pmd_internals *pmd)
+{
+ struct utsname utsname;
+ int ver[3];
+
+ if (uname(&utsname) == -1 ||
+ sscanf(utsname.release, "%d.%d.%d",
+ &ver[0], &ver[1], &ver[2]) != 3)
+ return 0;
+ if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >= FLOWER_KERNEL_VERSION)
+ pmd->flower_support = 1;
+ if (KERNEL_VERSION(ver[0], ver[1], ver[2]) >=
+ FLOWER_VLAN_KERNEL_VERSION)
+ pmd->flower_vlan_support = 1;
+ return 1;
+}
+
+static int
+eth_dev_tap_create(struct rte_vdev_device *vdev, char *tap_name,
+ char *remote_iface)
+{
+ int numa_node = rte_socket_id();
+ struct rte_eth_dev *dev;
+ struct pmd_internals *pmd;
+ struct rte_eth_dev_data *data;
+ int i;
+
+ RTE_LOG(DEBUG, PMD, " TAP device on numa %u\n", rte_socket_id());
+
+ data = rte_zmalloc_socket(tap_name, sizeof(*data), 0, numa_node);
+ if (!data) {
+ RTE_LOG(ERR, PMD, "TAP Failed to allocate data\n");
+ goto error_exit;
+ }
+
+ dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
+ if (!dev) {
+ RTE_LOG(ERR, PMD, "TAP Unable to allocate device struct\n");
+ goto error_exit;
+ }
+
+ pmd = dev->data->dev_private;
+ snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
+ pmd->nb_queues = RTE_PMD_TAP_MAX_QUEUES;
+
+ pmd->ioctl_sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (pmd->ioctl_sock == -1) {
+ RTE_LOG(ERR, PMD,
+ "TAP Unable to get a socket for management: %s\n",
+ strerror(errno));
+ goto error_exit;
+ }
+
+ /* Setup some default values */
+ rte_memcpy(data, dev->data, sizeof(*data));
+ data->dev_private = pmd;
+ data->dev_flags = RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
+ data->numa_node = numa_node;
+ data->drv_name = pmd_tap_drv.driver.name;
+
+ data->dev_link = pmd_link;
+ data->mac_addrs = &pmd->eth_addr;
+ data->nb_rx_queues = pmd->nb_queues;
+ data->nb_tx_queues = pmd->nb_queues;
+
+ dev->data = data;
+ dev->dev_ops = &ops;
+ dev->rx_pkt_burst = pmd_rx_burst;
+ dev->tx_pkt_burst = pmd_tx_burst;
+
+ pmd->intr_handle.type = RTE_INTR_HANDLE_EXT;
+ pmd->intr_handle.fd = -1;
+
+ /* Presetup the fds to -1 as being not valid */
+ for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
+ pmd->rxq[i].fd = -1;
+ pmd->txq[i].fd = -1;
+ }
+
+ tap_kernel_support(pmd);
+ if (!pmd->flower_support)
+ return 0;
+ LIST_INIT(&pmd->flows);
+ /*
+ * If no netlink socket can be created, then it will fail when
+ * creating/destroying flow rules.
+ */
+ pmd->nlsk_fd = nl_init(0);
+ if (strlen(remote_iface)) {
+ struct ifreq ifr;
+
+ pmd->remote_if_index = if_nametoindex(remote_iface);
+ snprintf(pmd->remote_iface, RTE_ETH_NAME_MAX_LEN,
+ "%s", remote_iface);
+ if (!pmd->remote_if_index) {
+ RTE_LOG(ERR, PMD, "Could not find %s ifindex: "
+ "remote interface will remain unconfigured\n",
+ remote_iface);
+ return 0;
+ }
+ if (tap_ioctl(pmd, SIOCGIFHWADDR, &ifr, 0, REMOTE_ONLY) < 0) {
+ RTE_LOG(ERR, PMD, "Could not get remote MAC address\n");
+ goto error_exit;
+ }
+ rte_memcpy(&pmd->eth_addr, ifr.ifr_hwaddr.sa_data,
+ ETHER_ADDR_LEN);
+ } else {
+ eth_random_addr((uint8_t *)&pmd->eth_addr);
+ }
+
+ return 0;
+
+error_exit:
+ RTE_LOG(DEBUG, PMD, "TAP Unable to initialize %s\n",
+ rte_vdev_device_name(vdev));
+
+ rte_free(data);
+ return -EINVAL;
+}
+
+static int
+set_interface_name(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ char *name = (char *)extra_args;
+
+ if (value)
+ snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s", value);
+ else
+ snprintf(name, RTE_ETH_NAME_MAX_LEN - 1, "%s%d",
+ DEFAULT_TAP_NAME, (tap_unit - 1));
+
+ return 0;
+}
+
+static int
+set_interface_speed(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ *(int *)extra_args = (value) ? atoi(value) : ETH_SPEED_NUM_10G;
+
+ return 0;
+}
+
+static int
+set_remote_iface(const char *key __rte_unused,
+ const char *value,
+ void *extra_args)
+{
+ char *name = (char *)extra_args;
+
+ if (value)
+ snprintf(name, RTE_ETH_NAME_MAX_LEN, "%s", value);
+
+ return 0;
+}
+
+/* Open a TAP interface device.
+ */
+static int
+rte_pmd_tap_probe(struct rte_vdev_device *dev)
+{
+ const char *name, *params;
+ int ret;
+ struct rte_kvargs *kvlist = NULL;
+ int speed;
+ char tap_name[RTE_ETH_NAME_MAX_LEN];
+ char remote_iface[RTE_ETH_NAME_MAX_LEN];
+
+ name = rte_vdev_device_name(dev);
+ params = rte_vdev_device_args(dev);
+
+ speed = ETH_SPEED_NUM_10G;
+ snprintf(tap_name, sizeof(tap_name), "%s%d",
+ DEFAULT_TAP_NAME, tap_unit++);
+ memset(remote_iface, 0, RTE_ETH_NAME_MAX_LEN);
+
+ if (params && (params[0] != '\0')) {
+ RTE_LOG(DEBUG, PMD, "paramaters (%s)\n", params);
+
+ kvlist = rte_kvargs_parse(params, valid_arguments);
+ if (kvlist) {
+ if (rte_kvargs_count(kvlist, ETH_TAP_SPEED_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_SPEED_ARG,
+ &set_interface_speed,
+ &speed);
+ if (ret == -1)
+ goto leave;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_TAP_IFACE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_IFACE_ARG,
+ &set_interface_name,
+ tap_name);
+ if (ret == -1)
+ goto leave;
+ }
+
+ if (rte_kvargs_count(kvlist, ETH_TAP_REMOTE_ARG) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ ETH_TAP_REMOTE_ARG,
+ &set_remote_iface,
+ remote_iface);
+ if (ret == -1)
+ goto leave;
+ }
+ }
+ }
+ pmd_link.link_speed = speed;
+
+ RTE_LOG(NOTICE, PMD, "Initializing pmd_tap for %s as %s\n",
+ name, tap_name);
+
+ ret = eth_dev_tap_create(dev, tap_name, remote_iface);
+
+leave:
+ if (ret == -1) {
+ RTE_LOG(ERR, PMD, "Failed to create pmd for %s as %s\n",
+ name, tap_name);
+ tap_unit--; /* Restore the unit number */
+ }
+ rte_kvargs_free(kvlist);
+
+ return ret;
+}
+
+/* detach a TAP device.
+ */
+static int
+rte_pmd_tap_remove(struct rte_vdev_device *dev)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+ struct pmd_internals *internals;
+ int i;
+
+ RTE_LOG(DEBUG, PMD, "Closing TUN/TAP Ethernet device on numa %u\n",
+ rte_socket_id());
+
+ /* find the ethdev entry */
+ eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
+ if (!eth_dev)
+ return 0;
+
+ internals = eth_dev->data->dev_private;
+ if (internals->flower_support && internals->nlsk_fd) {
+ tap_flow_flush(eth_dev, NULL);
+ tap_flow_implicit_flush(internals, NULL);
+ nl_final(internals->nlsk_fd);
+ }
+ for (i = 0; i < internals->nb_queues; i++)
+ if (internals->rxq[i].fd != -1)
+ close(internals->rxq[i].fd);
+
+ close(internals->ioctl_sock);
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
+static struct rte_vdev_driver pmd_tap_drv = {
+ .probe = rte_pmd_tap_probe,
+ .remove = rte_pmd_tap_remove,
+};
+RTE_PMD_REGISTER_VDEV(net_tap, pmd_tap_drv);
+RTE_PMD_REGISTER_ALIAS(net_tap, eth_tap);
+RTE_PMD_REGISTER_PARAM_STRING(net_tap,
+ ETH_TAP_IFACE_ARG "=<string> "
+ ETH_TAP_SPEED_ARG "=<int> "
+ ETH_TAP_REMOTE_ARG "=<string>");
diff --git a/drivers/net/tap/rte_eth_tap.h b/drivers/net/tap/rte_eth_tap.h
new file mode 100644
index 00000000..ad497b3d
--- /dev/null
+++ b/drivers/net/tap/rte_eth_tap.h
@@ -0,0 +1,100 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ETH_TAP_H_
+#define _RTE_ETH_TAP_H_
+
+#include <sys/queue.h>
+#include <sys/uio.h>
+#include <inttypes.h>
+
+#include <linux/if_tun.h>
+
+#include <rte_ethdev.h>
+#include <rte_ether.h>
+
+#ifdef IFF_MULTI_QUEUE
+#define RTE_PMD_TAP_MAX_QUEUES 16
+#else
+#define RTE_PMD_TAP_MAX_QUEUES 1
+#endif
+
+struct pkt_stats {
+ uint64_t opackets; /* Number of output packets */
+ uint64_t ipackets; /* Number of input packets */
+ uint64_t obytes; /* Number of bytes on output */
+ uint64_t ibytes; /* Number of bytes on input */
+ uint64_t errs; /* Number of TX error packets */
+ uint64_t ierrors; /* Number of RX error packets */
+ uint64_t rx_nombuf; /* Nb of RX mbuf alloc failures */
+};
+
+struct rx_queue {
+ struct rte_mempool *mp; /* Mempool for RX packets */
+ uint32_t trigger_seen; /* Last seen Rx trigger value */
+ uint16_t in_port; /* Port ID */
+ int fd;
+ struct pkt_stats stats; /* Stats for this RX queue */
+ uint16_t nb_rx_desc; /* max number of mbufs available */
+ struct rte_eth_rxmode *rxmode; /* RX features */
+ struct rte_mbuf *pool; /* mbufs pool for this queue */
+ struct iovec (*iovecs)[]; /* descriptors for this queue */
+ struct tun_pi pi; /* packet info for iovecs */
+};
+
+struct tx_queue {
+ int fd;
+ uint16_t *mtu; /* Pointer to MTU from dev_data */
+ struct pkt_stats stats; /* Stats for this TX queue */
+};
+
+struct pmd_internals {
+ char remote_iface[RTE_ETH_NAME_MAX_LEN]; /* Remote netdevice name */
+ char name[RTE_ETH_NAME_MAX_LEN]; /* Internal Tap device name */
+ uint16_t nb_queues; /* Number of queues supported */
+ struct ether_addr eth_addr; /* Mac address of the device port */
+ int remote_if_index; /* remote netdevice IF_INDEX */
+ int if_index; /* IF_INDEX for the port */
+ int ioctl_sock; /* socket for ioctl calls */
+ int nlsk_fd; /* Netlink socket fd */
+ int flower_support; /* 1 if kernel supports, else 0 */
+ int flower_vlan_support; /* 1 if kernel supports, else 0 */
+ LIST_HEAD(tap_flows, rte_flow) flows; /* rte_flow rules */
+ /* implicit rte_flow rules set when a remote device is active */
+ LIST_HEAD(tap_implicit_flows, rte_flow) implicit_flows;
+ struct rx_queue rxq[RTE_PMD_TAP_MAX_QUEUES]; /* List of RX queues */
+ struct tx_queue txq[RTE_PMD_TAP_MAX_QUEUES]; /* List of TX queues */
+ struct rte_intr_handle intr_handle; /* LSC interrupt handle. */
+};
+
+#endif /* _RTE_ETH_TAP_H_ */
diff --git a/drivers/net/tap/rte_pmd_tap_version.map b/drivers/net/tap/rte_pmd_tap_version.map
new file mode 100644
index 00000000..31eca32e
--- /dev/null
+++ b/drivers/net/tap/rte_pmd_tap_version.map
@@ -0,0 +1,4 @@
+DPDK_17.02 {
+
+ local: *;
+};
diff --git a/drivers/net/tap/tap_flow.c b/drivers/net/tap/tap_flow.c
new file mode 100644
index 00000000..cf1c8a26
--- /dev/null
+++ b/drivers/net/tap/tap_flow.c
@@ -0,0 +1,1507 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <sys/queue.h>
+
+#include <rte_byteorder.h>
+#include <rte_jhash.h>
+#include <rte_malloc.h>
+#include <rte_eth_tap.h>
+#include <tap_flow.h>
+#include <tap_autoconf.h>
+#include <tap_tcmsgs.h>
+
+#ifndef HAVE_TC_FLOWER
+/*
+ * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
+ * avoid sending TC messages the kernel cannot understand.
+ */
+enum {
+ TCA_FLOWER_UNSPEC,
+ TCA_FLOWER_CLASSID,
+ TCA_FLOWER_INDEV,
+ TCA_FLOWER_ACT,
+ TCA_FLOWER_KEY_ETH_DST, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_DST_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_SRC_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ETH_TYPE, /* be16 */
+ TCA_FLOWER_KEY_IP_PROTO, /* u8 */
+ TCA_FLOWER_KEY_IPV4_SRC, /* be32 */
+ TCA_FLOWER_KEY_IPV4_SRC_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST, /* be32 */
+ TCA_FLOWER_KEY_IPV4_DST_MASK, /* be32 */
+ TCA_FLOWER_KEY_IPV6_SRC, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_SRC_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST, /* struct in6_addr */
+ TCA_FLOWER_KEY_IPV6_DST_MASK, /* struct in6_addr */
+ TCA_FLOWER_KEY_TCP_SRC, /* be16 */
+ TCA_FLOWER_KEY_TCP_DST, /* be16 */
+ TCA_FLOWER_KEY_UDP_SRC, /* be16 */
+ TCA_FLOWER_KEY_UDP_DST, /* be16 */
+};
+#endif
+#ifndef HAVE_TC_VLAN_ID
+enum {
+ /* TCA_FLOWER_FLAGS, */
+ TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
+ TCA_FLOWER_KEY_VLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE, /* be16 */
+};
+#endif
+
+struct rte_flow {
+ LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
+ struct rte_flow *remote_flow; /* associated remote flow */
+ struct nlmsg msg;
+};
+
+struct convert_data {
+ uint16_t eth_type;
+ uint16_t ip_proto;
+ uint8_t vlan;
+ struct rte_flow *flow;
+};
+
+struct remote_rule {
+ struct rte_flow_attr attr;
+ struct rte_flow_item items[2];
+ int mirred;
+};
+
+static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
+static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
+static int
+tap_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+static struct rte_flow *
+tap_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error);
+
+static int
+tap_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error);
+
+static const struct rte_flow_ops tap_flow_ops = {
+ .validate = tap_flow_validate,
+ .create = tap_flow_create,
+ .destroy = tap_flow_destroy,
+ .flush = tap_flow_flush,
+};
+
+/* Static initializer for items. */
+#define ITEMS(...) \
+ (const enum rte_flow_item_type []){ \
+ __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
+ }
+
+/* Structure to generate a simple graph of layers supported by the NIC. */
+struct tap_flow_items {
+ /* Bit-mask corresponding to what is supported for this item. */
+ const void *mask;
+ const unsigned int mask_sz; /* Bit-mask size in bytes. */
+ /*
+ * Bit-mask corresponding to the default mask, if none is provided
+ * along with the item.
+ */
+ const void *default_mask;
+ /**
+ * Conversion function from rte_flow to netlink attributes.
+ *
+ * @param item
+ * rte_flow item to convert.
+ * @param data
+ * Internal structure to store the conversion.
+ *
+ * @return
+ * 0 on success, negative value otherwise.
+ */
+ int (*convert)(const struct rte_flow_item *item, void *data);
+ /** List of possible following items. */
+ const enum rte_flow_item_type *const items;
+};
+
+/* Graph of supported items and associated actions. */
+static const struct tap_flow_items tap_flow_items[] = {
+ [RTE_FLOW_ITEM_TYPE_END] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
+ },
+ [RTE_FLOW_ITEM_TYPE_ETH] = {
+ .items = ITEMS(
+ RTE_FLOW_ITEM_TYPE_VLAN,
+ RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = -1,
+ },
+ .mask_sz = sizeof(struct rte_flow_item_eth),
+ .default_mask = &rte_flow_item_eth_mask,
+ .convert = tap_flow_create_eth,
+ },
+ [RTE_FLOW_ITEM_TYPE_VLAN] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
+ RTE_FLOW_ITEM_TYPE_IPV6),
+ .mask = &(const struct rte_flow_item_vlan){
+ .tpid = -1,
+ /* DEI matching is not supported */
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ .tci = 0xffef,
+#else
+ .tci = 0xefff,
+#endif
+ },
+ .mask_sz = sizeof(struct rte_flow_item_vlan),
+ .default_mask = &rte_flow_item_vlan_mask,
+ .convert = tap_flow_create_vlan,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV4] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .mask = &(const struct rte_flow_item_ipv4){
+ .hdr = {
+ .src_addr = -1,
+ .dst_addr = -1,
+ .next_proto_id = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_ipv4),
+ .default_mask = &rte_flow_item_ipv4_mask,
+ .convert = tap_flow_create_ipv4,
+ },
+ [RTE_FLOW_ITEM_TYPE_IPV6] = {
+ .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
+ RTE_FLOW_ITEM_TYPE_TCP),
+ .mask = &(const struct rte_flow_item_ipv6){
+ .hdr = {
+ .src_addr = {
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .dst_addr = {
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ },
+ .proto = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_ipv6),
+ .default_mask = &rte_flow_item_ipv6_mask,
+ .convert = tap_flow_create_ipv6,
+ },
+ [RTE_FLOW_ITEM_TYPE_UDP] = {
+ .mask = &(const struct rte_flow_item_udp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_udp),
+ .default_mask = &rte_flow_item_udp_mask,
+ .convert = tap_flow_create_udp,
+ },
+ [RTE_FLOW_ITEM_TYPE_TCP] = {
+ .mask = &(const struct rte_flow_item_tcp){
+ .hdr = {
+ .src_port = -1,
+ .dst_port = -1,
+ },
+ },
+ .mask_sz = sizeof(struct rte_flow_item_tcp),
+ .default_mask = &rte_flow_item_tcp_mask,
+ .convert = tap_flow_create_tcp,
+ },
+};
+
+static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
+ [TAP_REMOTE_LOCAL_MAC] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_REDIR,
+ },
+ [TAP_REMOTE_BROADCAST] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ .spec = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_BROADCASTV6] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ },
+ .spec = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_PROMISC] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_ALLMULTI] = {
+ .attr = {
+ .group = MAX_GROUP,
+ .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
+ .ingress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .mask = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ },
+ .spec = &(const struct rte_flow_item_eth){
+ .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+ },
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+ [TAP_REMOTE_TX] = {
+ .attr = {
+ .group = 0,
+ .priority = TAP_REMOTE_TX,
+ .egress = 1,
+ },
+ .items[0] = {
+ .type = RTE_FLOW_ITEM_TYPE_VOID,
+ },
+ .items[1] = {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ .mirred = TCA_EGRESS_MIRROR,
+ },
+};
+
+/**
+ * Make as much checks as possible on an Ethernet item, and if a flow is
+ * provided, fill it appropriately with Ethernet info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_eth(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
+ /* TC does not support eth_type masking. Only accept if exact match. */
+ if (mask->type && mask->type != 0xffff)
+ return -1;
+ if (!spec)
+ return 0;
+ /* store eth_type for consistency if ipv4/6 pattern item comes next */
+ if (spec->type & mask->type)
+ info->eth_type = spec->type;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ if (spec->type & mask->type)
+ msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info,
+ (spec->type & mask->type));
+ if (!is_zero_ether_addr(&spec->dst)) {
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
+ &spec->dst.addr_bytes);
+ nlattr_add(&msg->nh,
+ TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
+ &mask->dst.addr_bytes);
+ }
+ if (!is_zero_ether_addr(&mask->src)) {
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
+ &spec->src.addr_bytes);
+ nlattr_add(&msg->nh,
+ TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
+ &mask->src.addr_bytes);
+ }
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on a VLAN item, and if a flow is provided,
+ * fill it appropriately with VLAN info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
+ /* TC does not support tpid masking. Only accept if exact match. */
+ if (mask->tpid && mask->tpid != 0xffff)
+ return -1;
+ /* Double-tagging not supported. */
+ if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q))
+ return -1;
+ info->vlan = 1;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
+#define VLAN_PRIO(tci) ((tci) >> 13)
+#define VLAN_ID(tci) ((tci) & 0xfff)
+ if (!spec)
+ return 0;
+ if (spec->tci) {
+ uint16_t tci = ntohs(spec->tci) & mask->tci;
+ uint16_t prio = VLAN_PRIO(tci);
+ uint8_t vid = VLAN_ID(tci);
+
+ if (prio)
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_VLAN_PRIO, prio);
+ if (vid)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_VLAN_ID, vid);
+ }
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on an IPv4 item, and if a flow is provided,
+ * fill it appropriately with IPv4 info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
+ /* check that previous eth type is compatible with ipv4 */
+ if (info->eth_type && info->eth_type != htons(ETH_P_IP))
+ return -1;
+ /* store ip_proto for consistency if udp/tcp pattern item comes next */
+ if (spec)
+ info->ip_proto = spec->hdr.next_proto_id;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ if (!info->eth_type)
+ info->eth_type = htons(ETH_P_IP);
+ if (!info->vlan)
+ msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_IP));
+ if (!spec)
+ return 0;
+ if (spec->hdr.dst_addr) {
+ nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
+ spec->hdr.dst_addr);
+ nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
+ mask->hdr.dst_addr);
+ }
+ if (spec->hdr.src_addr) {
+ nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
+ spec->hdr.src_addr);
+ nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ mask->hdr.src_addr);
+ }
+ if (spec->hdr.next_proto_id)
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
+ spec->hdr.next_proto_id);
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on an IPv6 item, and if a flow is provided,
+ * fill it appropriately with IPv6 info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ uint8_t empty_addr[16] = { 0 };
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
+ /* check that previous eth type is compatible with ipv6 */
+ if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
+ return -1;
+ /* store ip_proto for consistency if udp/tcp pattern item comes next */
+ if (spec)
+ info->ip_proto = spec->hdr.proto;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ if (!info->eth_type)
+ info->eth_type = htons(ETH_P_IPV6);
+ if (!info->vlan)
+ msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_IPV6));
+ if (!spec)
+ return 0;
+ if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
+ sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
+ sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
+ }
+ if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
+ sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
+ nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
+ }
+ if (spec->hdr.proto)
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on a UDP item, and if a flow is provided,
+ * fill it appropriately with UDP info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_udp(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
+ /* check that previous ip_proto is compatible with udp */
+ if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
+ return -1;
+ /* TC does not support UDP port masking. Only accept if exact match. */
+ if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
+ (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
+ return -1;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
+ if (!spec)
+ return 0;
+ if (spec->hdr.dst_port & mask->hdr.dst_port)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
+ spec->hdr.dst_port);
+ if (spec->hdr.src_port & mask->hdr.src_port)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
+ spec->hdr.src_port);
+ return 0;
+}
+
+/**
+ * Make as much checks as possible on a TCP item, and if a flow is provided,
+ * fill it appropriately with TCP info.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] data
+ * Additional data structure to tell next layers we've been here.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
+{
+ struct convert_data *info = (struct convert_data *)data;
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ struct rte_flow *flow = info->flow;
+ struct nlmsg *msg;
+
+ /* use default mask if none provided */
+ if (!mask)
+ mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
+ /* check that previous ip_proto is compatible with tcp */
+ if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
+ return -1;
+ /* TC does not support TCP port masking. Only accept if exact match. */
+ if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
+ (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
+ return -1;
+ if (!flow)
+ return 0;
+ msg = &flow->msg;
+ nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
+ if (!spec)
+ return 0;
+ if (spec->hdr.dst_port & mask->hdr.dst_port)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
+ spec->hdr.dst_port);
+ if (spec->hdr.src_port & mask->hdr.src_port)
+ nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
+ spec->hdr.src_port);
+ return 0;
+}
+
+/**
+ * Check support for a given item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param size
+ * Bit-Mask size in bytes.
+ * @param[in] supported_mask
+ * Bit-mask covering supported fields to compare with spec, last and mask in
+ * \item.
+ * @param[in] default_mask
+ * Bit-mask default mask if none is provided in \item.
+ *
+ * @return
+ * 0 on success.
+ */
+static int
+tap_flow_item_validate(const struct rte_flow_item *item,
+ unsigned int size,
+ const uint8_t *supported_mask,
+ const uint8_t *default_mask)
+{
+ int ret = 0;
+
+ /* An empty layer is allowed, as long as all fields are NULL */
+ if (!item->spec && (item->mask || item->last))
+ return -1;
+ /* Is the item spec compatible with what the NIC supports? */
+ if (item->spec && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->spec;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | supported_mask[i]) != supported_mask[i])
+ return -1;
+ /* Is the default mask compatible with what the NIC supports? */
+ for (i = 0; i < size; i++)
+ if ((default_mask[i] | supported_mask[i]) !=
+ supported_mask[i])
+ return -1;
+ }
+ /* Is the item last compatible with what the NIC supports? */
+ if (item->last && !item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->last;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | supported_mask[i]) != supported_mask[i])
+ return -1;
+ }
+ /* Is the item mask compatible with what the NIC supports? */
+ if (item->mask) {
+ unsigned int i;
+ const uint8_t *spec = item->mask;
+
+ for (i = 0; i < size; ++i)
+ if ((spec[i] | supported_mask[i]) != supported_mask[i])
+ return -1;
+ }
+ /**
+ * Once masked, Are item spec and item last equal?
+ * TC does not support range so anything else is invalid.
+ */
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ const uint8_t *apply = default_mask;
+ unsigned int i;
+
+ if (item->mask)
+ apply = item->mask;
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
+ last[i] = ((const uint8_t *)item->last)[i] & apply[i];
+ }
+ ret = memcmp(spec, last, size);
+ }
+ return ret;
+}
+
+/**
+ * Transform a DROP/PASSTHRU action item in the provided flow for TC.
+ *
+ * @param[in, out] flow
+ * Flow to be filled.
+ * @param[in] action
+ * Appropriate action to be set in the TCA_GACT_PARMS structure.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+add_action_gact(struct rte_flow *flow, int action)
+{
+ struct nlmsg *msg = &flow->msg;
+ size_t act_index = 1;
+ struct tc_gact p = {
+ .action = action
+ };
+
+ if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
+ return -1;
+ if (nlattr_nested_start(msg, act_index++) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("gact"), "gact");
+ if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(p), &p);
+ nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
+ nlattr_nested_finish(msg); /* nested act_index */
+ nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ return 0;
+}
+
+/**
+ * Transform a MIRRED action item in the provided flow for TC.
+ *
+ * @param[in, out] flow
+ * Flow to be filled.
+ * @param[in] ifindex
+ * Netdevice ifindex, where to mirror/redirect packet to.
+ * @param[in] action_type
+ * Either TCA_EGRESS_REDIR for redirection or TCA_EGRESS_MIRROR for mirroring.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+add_action_mirred(struct rte_flow *flow, uint16_t ifindex, uint16_t action_type)
+{
+ struct nlmsg *msg = &flow->msg;
+ size_t act_index = 1;
+ struct tc_mirred p = {
+ .eaction = action_type,
+ .ifindex = ifindex,
+ };
+
+ if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
+ return -1;
+ if (nlattr_nested_start(msg, act_index++) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("mirred"), "mirred");
+ if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
+ return -1;
+ if (action_type == TCA_EGRESS_MIRROR)
+ p.action = TC_ACT_PIPE;
+ else /* REDIRECT */
+ p.action = TC_ACT_STOLEN;
+ nlattr_add(&msg->nh, TCA_MIRRED_PARMS, sizeof(p), &p);
+ nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
+ nlattr_nested_finish(msg); /* nested act_index */
+ nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ return 0;
+}
+
+/**
+ * Transform a QUEUE action item in the provided flow for TC.
+ *
+ * @param[in, out] flow
+ * Flow to be filled.
+ * @param[in] queue
+ * Queue id to use.
+ *
+ * @return
+ * 0 if checks are alright, -1 otherwise.
+ */
+static int
+add_action_skbedit(struct rte_flow *flow, uint16_t queue)
+{
+ struct nlmsg *msg = &flow->msg;
+ size_t act_index = 1;
+ struct tc_skbedit p = {
+ .action = TC_ACT_PIPE
+ };
+
+ if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
+ return -1;
+ if (nlattr_nested_start(msg, act_index++) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("skbedit"), "skbedit");
+ if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
+ return -1;
+ nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS, sizeof(p), &p);
+ nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING, queue);
+ nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
+ nlattr_nested_finish(msg); /* nested act_index */
+ nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
+ return 0;
+}
+
+/**
+ * Validate a flow supported by TC.
+ * If flow param is not NULL, then also fill the netlink message inside.
+ *
+ * @param pmd
+ * Pointer to private structure.
+ * @param[in] attr
+ * Flow rule attributes.
+ * @param[in] pattern
+ * Pattern specification (list terminated by the END pattern item).
+ * @param[in] actions
+ * Associated actions (list terminated by the END action).
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * @param[in, out] flow
+ * Flow structure to update.
+ * @param[in] mirred
+ * If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
+ * redirection to the tap netdevice, and the TC rule will be configured
+ * on the remote netdevice in pmd.
+ * If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
+ * mirroring to the tap netdevice, and the TC rule will be configured
+ * on the remote netdevice in pmd. Matching packets will thus be duplicated.
+ * If set to 0, the standard behavior is to be used: set correct actions for
+ * the TC rule, and apply it on the tap netdevice.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+priv_flow_process(struct pmd_internals *pmd,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct rte_flow *flow,
+ int mirred)
+{
+ const struct tap_flow_items *cur_item = tap_flow_items;
+ struct convert_data data = {
+ .eth_type = 0,
+ .ip_proto = 0,
+ .flow = flow,
+ };
+ int action = 0; /* Only one action authorized for now */
+
+ if (attr->group > MAX_GROUP) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL, "group value too big: cannot exceed 15");
+ return -rte_errno;
+ }
+ if (attr->priority > MAX_PRIORITY) {
+ rte_flow_error_set(
+ error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "priority value too big");
+ return -rte_errno;
+ } else if (flow) {
+ uint16_t group = attr->group << GROUP_SHIFT;
+ uint16_t prio = group | (attr->priority + PRIORITY_OFFSET);
+ flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
+ flow->msg.t.tcm_info);
+ }
+ if (flow) {
+ if (mirred) {
+ /*
+ * If attr->ingress, the rule applies on remote ingress
+ * to match incoming packets
+ * If attr->egress, the rule applies on tap ingress (as
+ * seen from the kernel) to deal with packets going out
+ * from the DPDK app.
+ */
+ flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
+ } else {
+ /* Standard rule on tap egress (kernel standpoint). */
+ flow->msg.t.tcm_parent =
+ TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
+ }
+ /* use flower filter type */
+ nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
+ if (nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
+ goto exit_item_not_supported;
+ }
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
+ const struct tap_flow_items *token = NULL;
+ unsigned int i;
+ int err = 0;
+
+ if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+ for (i = 0;
+ cur_item->items &&
+ cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
+ ++i) {
+ if (cur_item->items[i] == items->type) {
+ token = &tap_flow_items[items->type];
+ break;
+ }
+ }
+ if (!token)
+ goto exit_item_not_supported;
+ cur_item = token;
+ err = tap_flow_item_validate(
+ items, cur_item->mask_sz,
+ (const uint8_t *)cur_item->mask,
+ (const uint8_t *)cur_item->default_mask);
+ if (err)
+ goto exit_item_not_supported;
+ if (flow && cur_item->convert) {
+ if (!pmd->flower_vlan_support &&
+ cur_item->convert == tap_flow_create_vlan)
+ goto exit_item_not_supported;
+ err = cur_item->convert(items, &data);
+ if (err)
+ goto exit_item_not_supported;
+ }
+ }
+ if (flow) {
+ if (pmd->flower_vlan_support && data.vlan) {
+ nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
+ htons(ETH_P_8021Q));
+ nlattr_add16(&flow->msg.nh,
+ TCA_FLOWER_KEY_VLAN_ETH_TYPE,
+ data.eth_type ?
+ data.eth_type : htons(ETH_P_ALL));
+ } else if (data.eth_type) {
+ nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
+ data.eth_type);
+ }
+ }
+ if (mirred && flow) {
+ uint16_t if_index = pmd->if_index;
+
+ /*
+ * If attr->egress && mirred, then this is a special
+ * case where the rule must be applied on the tap, to
+ * redirect packets coming from the DPDK App, out
+ * through the remote netdevice.
+ */
+ if (attr->egress)
+ if_index = pmd->remote_if_index;
+ if (add_action_mirred(flow, if_index, mirred) < 0)
+ goto exit_action_not_supported;
+ else
+ goto end;
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
+ int err = 0;
+
+ if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
+ continue;
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (flow)
+ err = add_action_gact(flow, TC_ACT_SHOT);
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (flow)
+ err = add_action_gact(flow, TC_ACT_UNSPEC);
+ } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+ const struct rte_flow_action_queue *queue =
+ (const struct rte_flow_action_queue *)
+ actions->conf;
+ if (action)
+ goto exit_action_not_supported;
+ action = 1;
+ if (!queue || (queue->index >= pmd->nb_queues))
+ goto exit_action_not_supported;
+ if (flow)
+ err = add_action_skbedit(flow, queue->index);
+ } else {
+ goto exit_action_not_supported;
+ }
+ if (err)
+ goto exit_action_not_supported;
+ }
+end:
+ if (flow)
+ nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
+ return 0;
+exit_item_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ return -rte_errno;
+exit_action_not_supported:
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "action not supported");
+ return -rte_errno;
+}
+
+
+
+/**
+ * Validate a flow.
+ *
+ * @see rte_flow_validate()
+ * @see rte_flow_ops
+ */
+static int
+tap_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
+}
+
+/**
+ * Set a unique handle in a flow.
+ *
+ * The kernel supports TC rules with equal priority, as long as they use the
+ * same matching fields (e.g.: dst mac and ipv4) with different values (and
+ * full mask to ensure no collision is possible).
+ * In those rules, the handle (uint32_t) is the part that would identify
+ * specifically each rule.
+ *
+ * On 32-bit architectures, the handle can simply be the flow's pointer address.
+ * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
+ * unique handle.
+ *
+ * @param[in, out] flow
+ * The flow that needs its handle set.
+ */
+static void
+tap_flow_set_handle(struct rte_flow *flow)
+{
+ uint32_t handle = 0;
+
+ if (sizeof(flow) > 4)
+ handle = rte_jhash(&flow, sizeof(flow), 1);
+ else
+ handle = (uintptr_t)flow;
+ /* must be at least 1 to avoid letting the kernel choose one for us */
+ if (!handle)
+ handle = 1;
+ flow->msg.t.tcm_handle = handle;
+}
+
+/**
+ * Create a flow.
+ *
+ * @see rte_flow_create()
+ * @see rte_flow_ops
+ */
+static struct rte_flow *
+tap_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rte_flow *remote_flow = NULL;
+ struct rte_flow *flow = NULL;
+ struct nlmsg *msg = NULL;
+ int err;
+
+ if (!pmd->if_index) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "can't create rule, ifindex not found");
+ goto fail;
+ }
+ /*
+ * No rules configured through standard rte_flow should be set on the
+ * priorities used by implicit rules.
+ */
+ if ((attr->group == MAX_GROUP) &&
+ attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL, "priority value too big");
+ goto fail;
+ }
+ flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!flow) {
+ rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "cannot allocate memory for rte_flow");
+ goto fail;
+ }
+ msg = &flow->msg;
+ tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(flow);
+ if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
+ goto fail;
+ err = nl_send(pmd->nlsk_fd, &msg->nh);
+ if (err < 0) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "couldn't send request to kernel");
+ goto fail;
+ }
+ err = nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule creation (%d): %s\n",
+ errno, strerror(errno));
+ rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "overlapping rules");
+ goto fail;
+ }
+ LIST_INSERT_HEAD(&pmd->flows, flow, next);
+ /**
+ * If a remote device is configured, a TC rule with identical items for
+ * matching must be set on that device, with a single action: redirect
+ * to the local pmd->if_index.
+ */
+ if (pmd->remote_if_index) {
+ remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!remote_flow) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "cannot allocate memory for rte_flow");
+ goto fail;
+ }
+ msg = &remote_flow->msg;
+ /* set the rule if_index for the remote netdevice */
+ tc_init_msg(
+ msg, pmd->remote_if_index, RTM_NEWTFILTER,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(remote_flow);
+ if (priv_flow_process(pmd, attr, items, NULL,
+ error, remote_flow, TCA_EGRESS_REDIR)) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "rte flow rule validation failed");
+ goto fail;
+ }
+ err = nl_send(pmd->nlsk_fd, &msg->nh);
+ if (err < 0) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failure sending nl request");
+ goto fail;
+ }
+ err = nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule creation (%d): %s\n",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "overlapping rules");
+ goto fail;
+ }
+ flow->remote_flow = remote_flow;
+ }
+ return flow;
+fail:
+ if (remote_flow)
+ rte_free(remote_flow);
+ if (flow)
+ rte_free(flow);
+ return NULL;
+}
+
+/**
+ * Destroy a flow using pointer to pmd_internal.
+ *
+ * @param[in, out] pmd
+ * Pointer to private structure.
+ * @param[in] flow
+ * Pointer to the flow to destroy.
+ * @param[in, out] error
+ * Pointer to the flow error handler
+ *
+ * @return 0 if the flow could be destroyed, -1 otherwise.
+ */
+static int
+tap_flow_destroy_pmd(struct pmd_internals *pmd,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *remote_flow = flow->remote_flow;
+ int ret = 0;
+
+ LIST_REMOVE(flow, next);
+ flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
+
+ ret = nl_send(pmd->nlsk_fd, &flow->msg.nh);
+ if (ret < 0) {
+ rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "couldn't send request to kernel");
+ goto end;
+ }
+ ret = nl_recv_ack(pmd->nlsk_fd);
+ /* If errno is ENOENT, the rule is already no longer in the kernel. */
+ if (ret < 0 && errno == ENOENT)
+ ret = 0;
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule deletion (%d): %s\n",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
+ "couldn't receive kernel ack to our request");
+ goto end;
+ }
+ if (remote_flow) {
+ remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
+
+ ret = nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
+ if (ret < 0) {
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failure sending nl request");
+ goto end;
+ }
+ ret = nl_recv_ack(pmd->nlsk_fd);
+ if (ret < 0 && errno == ENOENT)
+ ret = 0;
+ if (ret < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule deletion (%d): %s\n",
+ errno, strerror(errno));
+ rte_flow_error_set(
+ error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL, "Failure trying to receive nl ack");
+ goto end;
+ }
+ }
+end:
+ if (remote_flow)
+ rte_free(remote_flow);
+ rte_free(flow);
+ return ret;
+}
+
+/**
+ * Destroy a flow.
+ *
+ * @see rte_flow_destroy()
+ * @see rte_flow_ops
+ */
+static int
+tap_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ return tap_flow_destroy_pmd(pmd, flow, error);
+}
+
+/**
+ * Destroy all flows.
+ *
+ * @see rte_flow_flush()
+ * @see rte_flow_ops
+ */
+int
+tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+ struct rte_flow *flow;
+
+ while (!LIST_EMPTY(&pmd->flows)) {
+ flow = LIST_FIRST(&pmd->flows);
+ if (tap_flow_destroy(dev, flow, error) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Add an implicit flow rule on the remote device to make sure traffic gets to
+ * the tap netdevice from there.
+ *
+ * @param pmd
+ * Pointer to private structure.
+ * @param[in] idx
+ * The idx in the implicit_rte_flows array specifying which rule to apply.
+ *
+ * @return -1 if the rule couldn't be applied, 0 otherwise.
+ */
+int tap_flow_implicit_create(struct pmd_internals *pmd,
+ enum implicit_rule_index idx)
+{
+ struct rte_flow_item *items = implicit_rte_flows[idx].items;
+ struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
+ struct rte_flow_item_eth eth_local = { .type = 0 };
+ uint16_t if_index = pmd->remote_if_index;
+ struct rte_flow *remote_flow = NULL;
+ struct nlmsg *msg = NULL;
+ int err = 0;
+ struct rte_flow_item items_local[2] = {
+ [0] = {
+ .type = items[0].type,
+ .spec = &eth_local,
+ .mask = items[0].mask,
+ },
+ [1] = {
+ .type = items[1].type,
+ }
+ };
+
+ remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
+ if (!remote_flow) {
+ RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow");
+ goto fail;
+ }
+ msg = &remote_flow->msg;
+ if (idx == TAP_REMOTE_TX) {
+ if_index = pmd->if_index;
+ } else if (idx == TAP_REMOTE_LOCAL_MAC) {
+ /*
+ * eth addr couldn't be set in implicit_rte_flows[] as it is not
+ * known at compile time.
+ */
+ memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
+ items = items_local;
+ }
+ tc_init_msg(msg, if_index, RTM_NEWTFILTER,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
+ tap_flow_set_handle(remote_flow);
+ if (priv_flow_process(pmd, attr, items, NULL, NULL,
+ remote_flow, implicit_rte_flows[idx].mirred)) {
+ RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
+ goto fail;
+ }
+ err = nl_send(pmd->nlsk_fd, &msg->nh);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD, "Failure sending nl request");
+ goto fail;
+ }
+ err = nl_recv_ack(pmd->nlsk_fd);
+ if (err < 0) {
+ RTE_LOG(ERR, PMD,
+ "Kernel refused TC filter rule creation (%d): %s\n",
+ errno, strerror(errno));
+ goto fail;
+ }
+ LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
+ return 0;
+fail:
+ if (remote_flow)
+ rte_free(remote_flow);
+ return -1;
+}
+
+/**
+ * Remove specific implicit flow rule on the remote device.
+ *
+ * @param[in, out] pmd
+ * Pointer to private structure.
+ * @param[in] idx
+ * The idx in the implicit_rte_flows array specifying which rule to remove.
+ *
+ * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
+ */
+int tap_flow_implicit_destroy(struct pmd_internals *pmd,
+ enum implicit_rule_index idx)
+{
+ struct rte_flow *remote_flow;
+ int cur_prio = -1;
+ int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
+
+ for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ remote_flow;
+ remote_flow = LIST_NEXT(remote_flow, next)) {
+ cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
+ if (cur_prio != idx_prio)
+ continue;
+ return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
+ }
+ return 0;
+}
+
+/**
+ * Destroy all implicit flows.
+ *
+ * @see rte_flow_flush()
+ */
+int
+tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
+{
+ struct rte_flow *remote_flow;
+
+ while (!LIST_EMPTY(&pmd->implicit_flows)) {
+ remote_flow = LIST_FIRST(&pmd->implicit_flows);
+ if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Manage filter operations.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param filter_type
+ * Filter type.
+ * @param filter_op
+ * Operation to perform.
+ * @param arg
+ * Pointer to operation-specific structure.
+ *
+ * @return
+ * 0 on success, negative errno value on failure.
+ */
+int
+tap_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct pmd_internals *pmd = dev->data->dev_private;
+
+ if (!pmd->flower_support)
+ return -ENOTSUP;
+ switch (filter_type) {
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &tap_flow_ops;
+ return 0;
+ default:
+ RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported",
+ (void *)dev, filter_type);
+ }
+ return -EINVAL;
+}
+
diff --git a/drivers/net/tap/tap_flow.h b/drivers/net/tap/tap_flow.h
new file mode 100644
index 00000000..94414f18
--- /dev/null
+++ b/drivers/net/tap/tap_flow.h
@@ -0,0 +1,82 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TAP_FLOW_H_
+#define _TAP_FLOW_H_
+
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_eth_tap.h>
+
+/**
+ * In TC, priority 0 means we require the kernel to allocate one for us.
+ * In rte_flow, however, we want the priority 0 to be the most important one.
+ * Use an offset to have the most important priority being 1 in TC.
+ */
+#define PRIORITY_OFFSET 1
+#define PRIORITY_MASK (0xfff)
+#define MAX_PRIORITY (PRIORITY_MASK - PRIORITY_OFFSET)
+#define GROUP_MASK (0xf)
+#define GROUP_SHIFT 12
+#define MAX_GROUP GROUP_MASK
+
+/**
+ * These index are actually in reversed order: their priority is processed
+ * by subtracting their value to the lowest priority (PRIORITY_MASK).
+ * Thus the first one will have the lowest priority in the end
+ * (but biggest value).
+ */
+enum implicit_rule_index {
+ TAP_REMOTE_TX,
+ TAP_REMOTE_BROADCASTV6,
+ TAP_REMOTE_BROADCAST,
+ TAP_REMOTE_ALLMULTI,
+ TAP_REMOTE_PROMISC,
+ TAP_REMOTE_LOCAL_MAC,
+ TAP_REMOTE_MAX_IDX,
+};
+
+int tap_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg);
+int tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error);
+
+int tap_flow_implicit_create(struct pmd_internals *pmd,
+ enum implicit_rule_index idx);
+int tap_flow_implicit_destroy(struct pmd_internals *pmd,
+ enum implicit_rule_index idx);
+int tap_flow_implicit_flush(struct pmd_internals *pmd,
+ struct rte_flow_error *error);
+
+#endif /* _TAP_FLOW_H_ */
diff --git a/drivers/net/tap/tap_netlink.c b/drivers/net/tap/tap_netlink.c
new file mode 100644
index 00000000..ee92e2e7
--- /dev/null
+++ b/drivers/net/tap/tap_netlink.c
@@ -0,0 +1,367 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/netlink.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <rte_malloc.h>
+#include <tap_netlink.h>
+#include <rte_random.h>
+
+/* Must be quite large to support dumping a huge list of QDISC or filters. */
+#define BUF_SIZE (32 * 1024) /* Size of the buffer to receive kernel messages */
+#define SNDBUF_SIZE 32768 /* Send buffer size for the netlink socket */
+#define RCVBUF_SIZE 32768 /* Receive buffer size for the netlink socket */
+
+struct nested_tail {
+ struct rtattr *tail;
+ struct nested_tail *prev;
+};
+
+/**
+ * Initialize a netlink socket for communicating with the kernel.
+ *
+ * @param nl_groups
+ * Set it to a netlink group value (e.g. RTMGRP_LINK) to receive messages for
+ * specific netlink multicast groups. Otherwise, no subscription will be made.
+ *
+ * @return
+ * netlink socket file descriptor on success, -1 otherwise.
+ */
+int
+nl_init(uint32_t nl_groups)
+{
+ int fd, sndbuf_size = SNDBUF_SIZE, rcvbuf_size = RCVBUF_SIZE;
+ struct sockaddr_nl local = {
+ .nl_family = AF_NETLINK,
+ .nl_groups = nl_groups,
+ };
+
+ fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
+ if (fd < 0) {
+ RTE_LOG(ERR, PMD, "Unable to create a netlink socket\n");
+ return -1;
+ }
+ if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf_size, sizeof(int))) {
+ RTE_LOG(ERR, PMD, "Unable to set socket buffer send size\n");
+ return -1;
+ }
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbuf_size, sizeof(int))) {
+ RTE_LOG(ERR, PMD, "Unable to set socket buffer receive size\n");
+ return -1;
+ }
+ if (bind(fd, (struct sockaddr *)&local, sizeof(local)) < 0) {
+ RTE_LOG(ERR, PMD, "Unable to bind to the netlink socket\n");
+ return -1;
+ }
+ return fd;
+}
+
+/**
+ * Clean up a netlink socket once all communicating with the kernel is finished.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ *
+ * @return
+ * 0 on success, -1 otherwise.
+ */
+int
+nl_final(int nlsk_fd)
+{
+ if (close(nlsk_fd)) {
+ RTE_LOG(ERR, PMD, "Failed to close netlink socket: %s (%d)\n",
+ strerror(errno), errno);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Send a message to the kernel on the netlink socket.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] nh
+ * The netlink message send to the kernel.
+ *
+ * @return
+ * the number of sent bytes on success, -1 otherwise.
+ */
+int
+nl_send(int nlsk_fd, struct nlmsghdr *nh)
+{
+ /* man 7 netlink EXAMPLE */
+ struct sockaddr_nl sa = {
+ .nl_family = AF_NETLINK,
+ };
+ struct iovec iov = {
+ .iov_base = nh,
+ .iov_len = nh->nlmsg_len,
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
+ int send_bytes;
+
+ nh->nlmsg_pid = 0; /* communication with the kernel uses pid 0 */
+ nh->nlmsg_seq = (uint32_t)rte_rand();
+ send_bytes = sendmsg(nlsk_fd, &msg, 0);
+ if (send_bytes < 0) {
+ RTE_LOG(ERR, PMD, "Failed to send netlink message: %s (%d)\n",
+ strerror(errno), errno);
+ return -1;
+ }
+ return send_bytes;
+}
+
+/**
+ * Check that the kernel sends an appropriate ACK in response to an nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+nl_recv_ack(int nlsk_fd)
+{
+ return nl_recv(nlsk_fd, NULL, NULL);
+}
+
+/**
+ * Receive a message from the kernel on the netlink socket, following an
+ * nl_send().
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] cb
+ * The callback function to call for each netlink message received.
+ * @param[in, out] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+nl_recv(int nlsk_fd, int (*cb)(struct nlmsghdr *, void *arg), void *arg)
+{
+ /* man 7 netlink EXAMPLE */
+ struct sockaddr_nl sa;
+ char buf[BUF_SIZE];
+ struct iovec iov = {
+ .iov_base = buf,
+ .iov_len = sizeof(buf),
+ };
+ struct msghdr msg = {
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ .msg_iov = &iov,
+ /* One message at a time */
+ .msg_iovlen = 1,
+ };
+ int multipart = 0;
+ int ret = 0;
+
+ do {
+ struct nlmsghdr *nh;
+ int recv_bytes = 0;
+
+ recv_bytes = recvmsg(nlsk_fd, &msg, 0);
+ if (recv_bytes < 0)
+ return -1;
+ for (nh = (struct nlmsghdr *)buf;
+ NLMSG_OK(nh, (unsigned int)recv_bytes);
+ nh = NLMSG_NEXT(nh, recv_bytes)) {
+ if (nh->nlmsg_type == NLMSG_ERROR) {
+ struct nlmsgerr *err_data = NLMSG_DATA(nh);
+
+ if (err_data->error < 0) {
+ errno = -err_data->error;
+ return -1;
+ }
+ /* Ack message. */
+ return 0;
+ }
+ /* Multi-part msgs and their trailing DONE message. */
+ if (nh->nlmsg_flags & NLM_F_MULTI) {
+ if (nh->nlmsg_type == NLMSG_DONE)
+ return 0;
+ multipart = 1;
+ }
+ if (cb)
+ ret = cb(nh, arg);
+ }
+ } while (multipart);
+ return ret;
+}
+
+/**
+ * Append a netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data_len
+ * The length of the data to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+nlattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned int data_len, const void *data)
+{
+ /* see man 3 rtnetlink */
+ struct rtattr *rta;
+
+ rta = (struct rtattr *)NLMSG_TAIL(nh);
+ rta->rta_len = RTA_LENGTH(data_len);
+ rta->rta_type = type;
+ memcpy(RTA_DATA(rta), data, data_len);
+ nh->nlmsg_len = NLMSG_ALIGN(nh->nlmsg_len) + RTA_ALIGN(rta->rta_len);
+}
+
+/**
+ * Append a uint8_t netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data)
+{
+ nlattr_add(nh, type, sizeof(uint8_t), &data);
+}
+
+/**
+ * Append a uint16_t netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data)
+{
+ nlattr_add(nh, type, sizeof(uint16_t), &data);
+}
+
+/**
+ * Append a uint16_t netlink attribute to a message.
+ *
+ * @param[in, out] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] type
+ * The type of attribute to append.
+ * @param[in] data
+ * The data to append.
+ */
+void
+nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data)
+{
+ nlattr_add(nh, type, sizeof(uint32_t), &data);
+}
+
+/**
+ * Start a nested netlink attribute.
+ * It must be followed later by a call to nlattr_nested_finish().
+ *
+ * @param[in, out] msg
+ * The netlink message where to edit the nested_tails metadata.
+ * @param[in] type
+ * The nested attribute type to append.
+ *
+ * @return
+ * -1 if adding a nested netlink attribute failed, 0 otherwise.
+ */
+int
+nlattr_nested_start(struct nlmsg *msg, uint16_t type)
+{
+ struct nested_tail *tail;
+
+ tail = rte_zmalloc(NULL, sizeof(struct nested_tail), 0);
+ if (!tail) {
+ RTE_LOG(ERR, PMD,
+ "Couldn't allocate memory for nested netlink"
+ " attribute\n");
+ return -1;
+ }
+
+ tail->tail = (struct rtattr *)NLMSG_TAIL(&msg->nh);
+
+ nlattr_add(&msg->nh, type, 0, NULL);
+
+ tail->prev = msg->nested_tails;
+
+ msg->nested_tails = tail;
+
+ return 0;
+}
+
+/**
+ * End a nested netlink attribute.
+ * It follows a call to nlattr_nested_start().
+ * In effect, it will modify the nested attribute length to include every bytes
+ * from the nested attribute start, up to here.
+ *
+ * @param[in, out] msg
+ * The netlink message where to edit the nested_tails metadata.
+ */
+void
+nlattr_nested_finish(struct nlmsg *msg)
+{
+ struct nested_tail *tail = msg->nested_tails;
+
+ tail->tail->rta_len = (char *)NLMSG_TAIL(&msg->nh) - (char *)tail->tail;
+
+ if (tail->prev)
+ msg->nested_tails = tail->prev;
+
+ rte_free(tail);
+}
diff --git a/drivers/net/tap/tap_netlink.h b/drivers/net/tap/tap_netlink.h
new file mode 100644
index 00000000..98e13902
--- /dev/null
+++ b/drivers/net/tap/tap_netlink.h
@@ -0,0 +1,69 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TAP_NETLINK_H_
+#define _TAP_NETLINK_H_
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <linux/rtnetlink.h>
+#include <linux/netlink.h>
+#include <stdio.h>
+
+#include <rte_log.h>
+
+#define NLMSG_BUF 512
+
+struct nlmsg {
+ struct nlmsghdr nh;
+ struct tcmsg t;
+ char buf[NLMSG_BUF];
+ struct nested_tail *nested_tails;
+};
+
+#define NLMSG_TAIL(nlh) (void *)((char *)(nlh) + NLMSG_ALIGN((nlh)->nlmsg_len))
+
+int nl_init(uint32_t nl_groups);
+int nl_final(int nlsk_fd);
+int nl_send(int nlsk_fd, struct nlmsghdr *nh);
+int nl_recv(int nlsk_fd, int (*callback)(struct nlmsghdr *, void *), void *arg);
+int nl_recv_ack(int nlsk_fd);
+void nlattr_add(struct nlmsghdr *nh, unsigned short type,
+ unsigned int data_len, const void *data);
+void nlattr_add8(struct nlmsghdr *nh, unsigned short type, uint8_t data);
+void nlattr_add16(struct nlmsghdr *nh, unsigned short type, uint16_t data);
+void nlattr_add32(struct nlmsghdr *nh, unsigned short type, uint32_t data);
+int nlattr_nested_start(struct nlmsg *msg, uint16_t type);
+void nlattr_nested_finish(struct nlmsg *msg);
+
+#endif /* _TAP_NETLINK_H_ */
diff --git a/drivers/net/tap/tap_tcmsgs.c b/drivers/net/tap/tap_tcmsgs.c
new file mode 100644
index 00000000..d74ac805
--- /dev/null
+++ b/drivers/net/tap/tap_tcmsgs.c
@@ -0,0 +1,323 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of 6WIND S.A. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <inttypes.h>
+#include <linux/netlink.h>
+#include <net/if.h>
+#include <string.h>
+
+#include <rte_log.h>
+#include <tap_tcmsgs.h>
+
+struct qdisc {
+ uint32_t handle;
+ uint32_t parent;
+};
+
+struct list_args {
+ int nlsk_fd;
+ uint16_t ifindex;
+ void *custom_arg;
+};
+
+struct qdisc_custom_arg {
+ uint32_t handle;
+ uint32_t parent;
+ uint8_t exists;
+};
+
+/**
+ * Initialize a netlink message with a TC header.
+ *
+ * @param[in, out] msg
+ * The netlink message to fill.
+ * @param[in] ifindex
+ * The netdevice ifindex where the rule will be applied.
+ * @param[in] type
+ * The type of TC message to create (RTM_NEWTFILTER, RTM_NEWQDISC, etc.).
+ * @param[in] flags
+ * Overrides the default netlink flags for this msg with those specified.
+ */
+void
+tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type, uint16_t flags)
+{
+ struct nlmsghdr *n = &msg->nh;
+
+ n->nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ n->nlmsg_type = type;
+ if (flags)
+ n->nlmsg_flags = flags;
+ else
+ n->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ msg->t.tcm_family = AF_UNSPEC;
+ msg->t.tcm_ifindex = ifindex;
+}
+
+/**
+ * Delete a specific QDISC identified by its iface, and it's handle and parent.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex on whom the deletion will happen.
+ * @param[in] qinfo
+ * Additional info to identify the QDISC (handle and parent).
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+static int
+qdisc_del(int nlsk_fd, uint16_t ifindex, struct qdisc *qinfo)
+{
+ struct nlmsg msg;
+ int fd = 0;
+
+ tc_init_msg(&msg, ifindex, RTM_DELQDISC, 0);
+ msg.t.tcm_handle = qinfo->handle;
+ msg.t.tcm_parent = qinfo->parent;
+ /* if no netlink socket is provided, create one */
+ if (!nlsk_fd) {
+ fd = nl_init(0);
+ if (fd < 0) {
+ RTE_LOG(ERR, PMD,
+ "Could not delete QDISC: null netlink socket\n");
+ return -1;
+ }
+ } else {
+ fd = nlsk_fd;
+ }
+ if (nl_send(fd, &msg.nh) < 0)
+ goto error;
+ if (nl_recv_ack(fd) < 0)
+ goto error;
+ if (!nlsk_fd)
+ return nl_final(fd);
+ return 0;
+error:
+ if (!nlsk_fd)
+ nl_final(fd);
+ return -1;
+}
+
+/**
+ * Add the multiqueue QDISC with MULTIQ_MAJOR_HANDLE handle.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to add the multiqueue QDISC.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+qdisc_add_multiq(int nlsk_fd, uint16_t ifindex)
+{
+ struct tc_multiq_qopt opt;
+ struct nlmsg msg;
+
+ tc_init_msg(&msg, ifindex, RTM_NEWQDISC,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg.t.tcm_handle = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
+ msg.t.tcm_parent = TC_H_ROOT;
+ nlattr_add(&msg.nh, TCA_KIND, sizeof("multiq"), "multiq");
+ nlattr_add(&msg.nh, TCA_OPTIONS, sizeof(opt), &opt);
+ if (nl_send(nlsk_fd, &msg.nh) < 0)
+ return -1;
+ if (nl_recv_ack(nlsk_fd) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Add the ingress QDISC with default ffff: handle.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where the QDISC will be added.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+qdisc_add_ingress(int nlsk_fd, uint16_t ifindex)
+{
+ struct nlmsg msg;
+
+ tc_init_msg(&msg, ifindex, RTM_NEWQDISC,
+ NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
+ msg.t.tcm_handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ msg.t.tcm_parent = TC_H_INGRESS;
+ nlattr_add(&msg.nh, TCA_KIND, sizeof("ingress"), "ingress");
+ if (nl_send(nlsk_fd, &msg.nh) < 0)
+ return -1;
+ if (nl_recv_ack(nlsk_fd) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Callback function to delete a QDISC.
+ *
+ * @param[in] nh
+ * The netlink message to parse, received from the kernel.
+ * @param[in] arg
+ * Custom arguments for the callback.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+static int
+qdisc_del_cb(struct nlmsghdr *nh, void *arg)
+{
+ struct tcmsg *t = NLMSG_DATA(nh);
+ struct list_args *args = arg;
+
+ struct qdisc qinfo = {
+ .handle = t->tcm_handle,
+ .parent = t->tcm_parent,
+ };
+
+ /* filter out other ifaces' qdiscs */
+ if (args->ifindex != (unsigned int)t->tcm_ifindex)
+ return 0;
+ /*
+ * Use another nlsk_fd (0) to avoid tampering with the current list
+ * iteration.
+ */
+ return qdisc_del(0, args->ifindex, &qinfo);
+}
+
+/**
+ * Iterate over all QDISC, and call the callback() function for each.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to find QDISCs.
+ * @param[in] callback
+ * The function to call for each QDISC.
+ * @param[in, out] arg
+ * The arguments to provide the callback function with.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+static int
+qdisc_iterate(int nlsk_fd, uint16_t ifindex,
+ int (*callback)(struct nlmsghdr *, void *), void *arg)
+{
+ struct nlmsg msg;
+ struct list_args args = {
+ .nlsk_fd = nlsk_fd,
+ .ifindex = ifindex,
+ .custom_arg = arg,
+ };
+
+ tc_init_msg(&msg, ifindex, RTM_GETQDISC, NLM_F_REQUEST | NLM_F_DUMP);
+ if (nl_send(nlsk_fd, &msg.nh) < 0)
+ return -1;
+ if (nl_recv(nlsk_fd, callback, &args) < 0)
+ return -1;
+ return 0;
+}
+
+/**
+ * Delete all QDISCs for a given netdevice.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to find QDISCs.
+ *
+ * @return
+ * 0 on success, -1 otherwise with errno set.
+ */
+int
+qdisc_flush(int nlsk_fd, uint16_t ifindex)
+{
+ return qdisc_iterate(nlsk_fd, ifindex, qdisc_del_cb, NULL);
+}
+
+/**
+ * Create the multiqueue QDISC, only if it does not exist already.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to add the multiqueue QDISC.
+ *
+ * @return
+ * 0 if the qdisc exists or if has been successfully added.
+ * Return -1 otherwise.
+ */
+int
+qdisc_create_multiq(int nlsk_fd, uint16_t ifindex)
+{
+ int err = 0;
+
+ err = qdisc_add_multiq(nlsk_fd, ifindex);
+ if (err < 0 && errno != -EEXIST) {
+ RTE_LOG(ERR, PMD, "Could not add multiq qdisc (%d): %s\n",
+ errno, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Create the ingress QDISC, only if it does not exist already.
+ *
+ * @param[in] nlsk_fd
+ * The netlink socket file descriptor used for communication.
+ * @param[in] ifindex
+ * The netdevice ifindex where to add the ingress QDISC.
+ *
+ * @return
+ * 0 if the qdisc exists or if has been successfully added.
+ * Return -1 otherwise.
+ */
+int
+qdisc_create_ingress(int nlsk_fd, uint16_t ifindex)
+{
+ int err = 0;
+
+ err = qdisc_add_ingress(nlsk_fd, ifindex);
+ if (err < 0 && errno != -EEXIST) {
+ RTE_LOG(ERR, PMD, "Could not add ingress qdisc (%d): %s\n",
+ errno, strerror(errno));
+ return -1;
+ }
+ return 0;
+}
diff --git a/drivers/net/null/rte_eth_null.h b/drivers/net/tap/tap_tcmsgs.h
index abada8c2..78959577 100644
--- a/drivers/net/null/rte_eth_null.h
+++ b/drivers/net/tap/tap_tcmsgs.h
@@ -1,8 +1,8 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Corporation. All rights reserved.
- * All rights reserved.
+ * Copyright 2017 6WIND S.A.
+ * Copyright 2017 Mellanox.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -14,7 +14,7 @@
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
+ * * Neither the name of 6WIND S.A. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -31,10 +31,31 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef RTE_ETH_NULL_H_
-#define RTE_ETH_NULL_H_
+#ifndef _TAP_TCMSGS_H_
+#define _TAP_TCMSGS_H_
-int eth_dev_null_create(const char *name, const unsigned numa_node,
- unsigned packet_size, unsigned packet_copy);
+#include <linux/if_ether.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include <linux/pkt_cls.h>
+#include <linux/tc_act/tc_mirred.h>
+#include <linux/tc_act/tc_gact.h>
+#include <linux/tc_act/tc_skbedit.h>
+#include <inttypes.h>
-#endif /* RTE_ETH_NULL_H_ */
+#include <rte_ether.h>
+#include <tap_netlink.h>
+
+#define MULTIQ_MAJOR_HANDLE (1 << 16)
+
+void tc_init_msg(struct nlmsg *msg, uint16_t ifindex, uint16_t type,
+ uint16_t flags);
+int qdisc_list(int nlsk_fd, uint16_t ifindex);
+int qdisc_flush(int nlsk_fd, uint16_t ifindex);
+int qdisc_create_ingress(int nlsk_fd, uint16_t ifindex);
+int qdisc_create_multiq(int nlsk_fd, uint16_t ifindex);
+int qdisc_add_ingress(int nlsk_fd, uint16_t ifindex);
+int qdisc_add_multiq(int nlsk_fd, uint16_t ifindex);
+int filter_list_ingress(int nlsk_fd, uint16_t ifindex);
+
+#endif /* _TAP_TCMSGS_H_ */
diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile
index bcab5f93..706250b8 100644
--- a/drivers/net/thunderx/Makefile
+++ b/drivers/net/thunderx/Makefile
@@ -65,8 +65,4 @@ CFLAGS_nicvf_rxtx.o += -fno-prefetch-loop-arrays
endif
CFLAGS_nicvf_rxtx.o += -Ofast
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_mempool lib/librte_mbuf
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.c b/drivers/net/thunderx/base/nicvf_bsvf.c
index 9e028a3a..49a2646d 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.c
+++ b/drivers/net/thunderx/base/nicvf_bsvf.c
@@ -37,7 +37,7 @@
#include "nicvf_bsvf.h"
#include "nicvf_plat.h"
-static SIMPLEQ_HEAD(, svf_entry) head = SIMPLEQ_HEAD_INITIALIZER(head);
+static STAILQ_HEAD(, svf_entry) head = STAILQ_HEAD_INITIALIZER(head);
void
nicvf_bsvf_push(struct svf_entry *entry)
@@ -45,7 +45,7 @@ nicvf_bsvf_push(struct svf_entry *entry)
assert(entry != NULL);
assert(entry->vf != NULL);
- SIMPLEQ_INSERT_TAIL(&head, entry, next);
+ STAILQ_INSERT_TAIL(&head, entry, next);
}
struct svf_entry *
@@ -53,14 +53,14 @@ nicvf_bsvf_pop(void)
{
struct svf_entry *entry;
- assert(!SIMPLEQ_EMPTY(&head));
+ assert(!STAILQ_EMPTY(&head));
- entry = SIMPLEQ_FIRST(&head);
+ entry = STAILQ_FIRST(&head);
assert(entry != NULL);
assert(entry->vf != NULL);
- SIMPLEQ_REMOVE_HEAD(&head, next);
+ STAILQ_REMOVE_HEAD(&head, next);
return entry;
}
@@ -68,5 +68,5 @@ nicvf_bsvf_pop(void)
int
nicvf_bsvf_empty(void)
{
- return SIMPLEQ_EMPTY(&head);
+ return STAILQ_EMPTY(&head);
}
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.h b/drivers/net/thunderx/base/nicvf_bsvf.h
index 5d5a25e2..fb9b2484 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.h
+++ b/drivers/net/thunderx/base/nicvf_bsvf.h
@@ -41,7 +41,7 @@ struct nicvf;
* The base queue structure to hold secondary qsets.
*/
struct svf_entry {
- SIMPLEQ_ENTRY(svf_entry) next; /**< Next element's pointer */
+ STAILQ_ENTRY(svf_entry) next; /**< Next element's pointer */
struct nicvf *vf; /**< Holder of a secondary qset */
};
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index 00dd2feb..79f83c8d 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -36,6 +36,8 @@
#include <stdint.h>
#include <stdbool.h>
+#include "nicvf_plat.h"
+
/* Virtual function register offsets */
#define NIC_VF_CFG (0x000020)
@@ -213,10 +215,6 @@
typedef uint64_t nicvf_phys_addr_t;
-#ifndef __BYTE_ORDER__
-#error __BYTE_ORDER__ not defined
-#endif
-
/* vNIC HW Enumerations */
enum nic_send_ld_type_e {
@@ -559,7 +557,7 @@ enum nic_stat_vnic_tx_e {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4;
uint64_t stdn_fault:1;
uint64_t rsvd0:1;
@@ -604,7 +602,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t pkt_len:16;
uint64_t l2_ptr:8;
uint64_t l3_ptr:8;
@@ -629,7 +627,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t rss_tag:32;
uint64_t vlan_tci:16;
uint64_t vlan_ptr:8;
@@ -646,7 +644,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint16_t rb3_sz;
uint16_t rb2_sz;
uint16_t rb1_sz;
@@ -663,7 +661,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint16_t rb7_sz;
uint16_t rb6_sz;
uint16_t rb5_sz;
@@ -680,7 +678,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint16_t rb11_sz;
uint16_t rb10_sz;
uint16_t rb9_sz;
@@ -697,7 +695,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t vlan_found:1;
uint64_t vlan_stripped:1;
uint64_t vlan2_found:1;
@@ -742,7 +740,7 @@ struct cqe_rx_t {
};
struct cqe_rx_tcp_err_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:60;
@@ -764,7 +762,7 @@ struct cqe_rx_tcp_err_t {
};
struct cqe_rx_tcp_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:52;
uint64_t cq_tcp_status:8;
@@ -786,7 +784,7 @@ struct cqe_rx_tcp_t {
};
struct cqe_send_t {
-#if defined(__BIG_ENDIAN_BITFIELD)
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:4;
uint64_t sqe_ptr:16;
@@ -798,7 +796,7 @@ struct cqe_send_t {
uint64_t send_status:8;
uint64_t ptp_timestamp:64; /* W1 */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
+#elif NICVF_BYTE_ORDER == NICVF_LITTLE_ENDIAN
uint64_t send_status:8;
uint64_t rsvd3:8;
uint64_t sq_idx:3;
@@ -814,7 +812,7 @@ struct cqe_send_t {
};
struct cq_entry_type_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4;
uint64_t __pad:60;
#else
@@ -835,7 +833,7 @@ union cq_entry_t {
NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
struct rbdr_entry_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
union {
struct {
uint64_t rsvd0:15;
@@ -860,7 +858,7 @@ NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
/* TCP reassembly context */
struct rbe_tcp_cnxt_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t tcp_pkt_cnt:12;
uint64_t rsvd1:4;
uint64_t align_hdr_bytes:4;
@@ -899,7 +897,7 @@ struct rx_hdr_t {
};
struct sq_crc_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t rsvd1:32;
uint64_t crc_ival:32;
uint64_t subdesc_type:4;
@@ -921,7 +919,7 @@ struct sq_crc_subdesc {
};
struct sq_gather_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4; /* W0 */
uint64_t ld_type:2;
uint64_t rsvd0:42;
@@ -942,7 +940,7 @@ struct sq_gather_subdesc {
/* SQ immediate subdescriptor */
struct sq_imm_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4; /* W0 */
uint64_t rsvd0:46;
uint64_t len:14;
@@ -958,7 +956,7 @@ struct sq_imm_subdesc {
};
struct sq_mem_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4; /* W0 */
uint64_t mem_alg:4;
uint64_t mem_dsz:2;
@@ -982,7 +980,7 @@ struct sq_mem_subdesc {
};
struct sq_hdr_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4;
uint64_t tso:1;
uint64_t post_cqe:1; /* Post CQE on no error also */
@@ -1045,7 +1043,7 @@ NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
/* Queue config register formats */
struct rq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_2_63:62;
uint64_t ena:1;
uint64_t reserved_0:1;
@@ -1059,7 +1057,7 @@ struct rq_cfg { union { struct {
}; };
struct cq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_43_63:21;
uint64_t ena:1;
uint64_t reset:1;
@@ -1085,7 +1083,7 @@ struct cq_cfg { union { struct {
}; };
struct sq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_20_63:44;
uint64_t ena:1;
uint64_t reserved_18_18:1;
@@ -1111,7 +1109,7 @@ struct sq_cfg { union { struct {
}; };
struct rbdr_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_45_63:19;
uint64_t ena:1;
uint64_t reset:1;
@@ -1139,7 +1137,7 @@ struct rbdr_cfg { union { struct {
}; };
struct pf_qs_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_32_63:32;
uint64_t ena:1;
uint64_t reserved_27_30:4;
@@ -1169,7 +1167,7 @@ struct pf_qs_cfg { union { struct {
}; };
struct pf_rq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved1:1;
uint64_t reserved0:34;
uint64_t strip_pre_l2:1;
@@ -1197,7 +1195,7 @@ struct pf_rq_cfg { union { struct {
}; };
struct pf_rq_drop_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t rbdr_red:1;
uint64_t cq_red:1;
uint64_t reserved3:14;
diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
index 3b7b8a51..a072f19d 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.c
+++ b/drivers/net/thunderx/base/nicvf_mbox.c
@@ -62,9 +62,6 @@ static const char *mbox_message[NIC_MBOX_MSG_MAX] = {
[NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
[NIC_MBOX_MSG_CFG_DONE] = "NIC_MBOX_MSG_CFG_DONE",
[NIC_MBOX_MSG_SHUTDOWN] = "NIC_MBOX_MSG_SHUTDOWN",
- [NIC_MBOX_MSG_RES_BIT] = "NIC_MBOX_MSG_RES_BIT",
- [NIC_MBOX_MSG_RSS_SIZE_RES_BIT] = "NIC_MBOX_MSG_RSS_SIZE",
- [NIC_MBOX_MSG_ALLOC_SQS_RES_BIT] = "NIC_MBOX_MSG_ALLOC_SQS",
};
static inline const char * __attribute__((unused))
@@ -176,7 +173,7 @@ nicvf_handle_mbx_intr(struct nicvf *nic)
case NIC_MBOX_MSG_NACK:
nic->pf_nacked = true;
break;
- case NIC_MBOX_MSG_RSS_SIZE_RES_BIT:
+ case NIC_MBOX_MSG_RSS_SIZE:
nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
nic->pf_acked = true;
break;
@@ -186,7 +183,7 @@ nicvf_handle_mbx_intr(struct nicvf *nic)
nic->speed = mbx.link_status.speed;
nic->pf_acked = true;
break;
- case NIC_MBOX_MSG_ALLOC_SQS_RES_BIT:
+ case NIC_MBOX_MSG_ALLOC_SQS:
assert_primary(nic);
if (mbx.sqs_alloc.qs_count != nic->sqs_count) {
nicvf_log_error("Received %" PRIu8 "/%" PRIu8
@@ -331,7 +328,7 @@ nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg)
{
struct nic_mbx mbx = { .msg = { 0 } };
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
qs_cfg->be = 1;
#endif
/* Send a mailbox msg to PF to config Qset */
diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
index 084f3a76..8675fe8f 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.h
+++ b/drivers/net/thunderx/base/nicvf_mbox.h
@@ -68,16 +68,10 @@
#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
-#define NIC_MBOX_MSG_CFG_DONE 0x7E /* VF configuration done */
-#define NIC_MBOX_MSG_SHUTDOWN 0x7F /* VF is being shutdown */
-#define NIC_MBOX_MSG_RES_BIT 0x80 /* Reset bit from PF */
+#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
#define NIC_MBOX_MSG_MAX 0x100 /* Maximum number of messages */
-#define NIC_MBOX_MSG_RSS_SIZE_RES_BIT \
- (NIC_MBOX_MSG_RSS_SIZE | NIC_MBOX_MSG_RES_BIT)
-#define NIC_MBOX_MSG_ALLOC_SQS_RES_BIT \
- (NIC_MBOX_MSG_ALLOC_SQS | NIC_MBOX_MSG_RES_BIT)
-
/* Get vNIC VF configuration */
struct nic_cfg_msg {
uint8_t msg;
@@ -157,6 +151,7 @@ struct rss_cfg_msg {
/* Physical interface link status */
struct bgx_link_status {
uint8_t msg;
+ uint8_t mac_type;
uint8_t link_up;
uint8_t duplex;
uint32_t speed;
diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
index 83c1844d..36da1200 100644
--- a/drivers/net/thunderx/base/nicvf_plat.h
+++ b/drivers/net/thunderx/base/nicvf_plat.h
@@ -65,35 +65,23 @@
#define nicvf_cpu_to_be_64(x) rte_cpu_to_be_64(x)
#define nicvf_be_to_cpu_64(x) rte_be_to_cpu_64(x)
+#define NICVF_BYTE_ORDER RTE_BYTE_ORDER
+#define NICVF_BIG_ENDIAN RTE_BIG_ENDIAN
+#define NICVF_LITTLE_ENDIAN RTE_LITTLE_ENDIAN
+
/* Constants */
#include <rte_ether.h>
#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN
+#include <rte_io.h>
+#define nicvf_addr_write(addr, val) rte_write64_relaxed((val), (void *)(addr))
+#define nicvf_addr_read(addr) rte_read64_relaxed((void *)(addr))
+
/* ARM64 specific functions */
#if defined(RTE_ARCH_ARM64)
#define nicvf_prefetch_store_keep(_ptr) ({\
asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
-static inline void __attribute__((always_inline))
-nicvf_addr_write(uintptr_t addr, uint64_t val)
-{
- asm volatile(
- "str %x[val], [%x[addr]]"
- :
- : [val] "r" (val), [addr] "r" (addr));
-}
-
-static inline uint64_t __attribute__((always_inline))
-nicvf_addr_read(uintptr_t addr)
-{
- uint64_t val;
-
- asm volatile(
- "ldr %x[val], [%x[addr]]"
- : [val] "=r" (val)
- : [addr] "r" (addr));
- return val;
-}
#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({ \
asm volatile( \
@@ -106,18 +94,6 @@ nicvf_addr_read(uintptr_t addr)
#define nicvf_prefetch_store_keep(_ptr) do {} while (0)
-static inline void __attribute__((always_inline))
-nicvf_addr_write(uintptr_t addr, uint64_t val)
-{
- *(volatile uint64_t *)addr = val;
-}
-
-static inline uint64_t __attribute__((always_inline))
-nicvf_addr_read(uintptr_t addr)
-{
- return *(volatile uint64_t *)addr;
-}
-
#define NICVF_LOAD_PAIR(reg1, reg2, addr) \
do { \
reg1 = nicvf_addr_read((uintptr_t)addr); \
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 466e49ce..e4910c9b 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -41,7 +41,6 @@
#include <inttypes.h>
#include <netinet/in.h>
#include <sys/queue.h>
-#include <sys/timerfd.h>
#include <rte_alarm.h>
#include <rte_atomic.h>
@@ -54,6 +53,7 @@
#include <rte_eal.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_memory.h>
@@ -145,16 +145,29 @@ nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
* Return 0 means link status changed, -1 means not changed
*/
static int
-nicvf_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete __rte_unused)
+nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
struct rte_eth_link link;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ int i;
PMD_INIT_FUNC_TRACE();
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
+ if (wait_to_complete) {
+ /* rte_eth_link_get() might need to wait up to 9 seconds */
+ for (i = 0; i < MAX_CHECK_TIME; i++) {
+ memset(&link, 0, sizeof(link));
+ nicvf_set_eth_link_status(nic, &link);
+ if (link.link_status)
+ break;
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+ } else {
+ memset(&link, 0, sizeof(link));
+ nicvf_set_eth_link_status(nic, &link);
+ }
return nicvf_atomic_write_link_status(dev, &link);
}
@@ -245,7 +258,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per RX ring stats */
for (qidx = rx_start; qidx <= rx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
@@ -258,7 +271,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per TX ring stats */
for (qidx = tx_start; qidx <= tx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
@@ -277,7 +290,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per RX ring stats */
for (qidx = rx_start; qidx <= rx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_rx_qstats(snic, &rx_qstats,
@@ -290,7 +303,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
nicvf_tx_range(dev, snic, &tx_start, &tx_end);
/* Reading per TX ring stats */
for (qidx = tx_start; qidx <= tx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_tx_qstats(snic, &tx_qstats,
@@ -1219,6 +1232,23 @@ nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
return nicvf_vf_stop_tx_queue(dev, nic, qidx);
}
+static inline void
+nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def;
+
+ RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer.value = *(uint64_t *)p;
+}
static int
nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
@@ -1311,6 +1341,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
else
rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+ nicvf_rxq_mbuf_setup(rxq);
/* Alloc completion queue */
if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
@@ -1335,9 +1366,12 @@ static void
nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
PMD_INIT_FUNC_TRACE();
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
dev_info->max_rx_queues =
@@ -1345,7 +1379,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_tx_queues =
(uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_mac_addrs = 1;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa =
@@ -1407,7 +1441,7 @@ static int
nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
{
int ret;
- uint16_t qidx;
+ uint16_t qidx, data_off;
uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
uint64_t mbuf_phys_off = 0;
struct nicvf_rxq *rxq;
@@ -1448,10 +1482,18 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
nic->vf_id, qidx, rxq->pool->name);
return -ENOMEM;
}
- rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
- rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM;
+ data_off = nicvf_mbuff_meta_length(mbuf);
+ data_off += RTE_PKTMBUF_HEADROOM;
rte_pktmbuf_free(mbuf);
+ if (data_off % RTE_CACHE_LINE_SIZE) {
+ PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
+ rxq->pool->name, data_off,
+ data_off % RTE_CACHE_LINE_SIZE);
+ return -EINVAL;
+ }
+ rxq->mbuf_phys_off -= data_off;
+
if (mbuf_phys_off == 0)
mbuf_phys_off = rxq->mbuf_phys_off;
if (mbuf_phys_off != rxq->mbuf_phys_off) {
@@ -1975,7 +2017,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
}
}
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
nic->device_id = pci_dev->id.device_id;
@@ -2108,16 +2150,25 @@ static const struct rte_pci_id pci_id_nicvf_map[] = {
},
};
-static struct eth_driver rte_nicvf_pmd = {
- .pci_drv = {
- .id_table = pci_id_nicvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = nicvf_eth_dev_init,
- .dev_private_size = sizeof(struct nicvf),
+static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
+ nicvf_eth_dev_init);
+}
+
+static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_nicvf_pmd = {
+ .id_table = pci_id_nicvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = nicvf_eth_pci_probe,
+ .remove = nicvf_eth_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index fc43b747..6cae8341 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -430,9 +430,9 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union cq_entry_t *desc = rxq->desc;
const uint64_t cqe_mask = rxq->qlen_mask;
uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
uint32_t cqe_head = rxq->head & cqe_mask;
int32_t available_space = rxq->available_space;
- uint8_t port_id = rxq->port_id;
const uint8_t rbptr_offset = rxq->rbptr_offset;
to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
@@ -448,17 +448,12 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
-
pkt->ol_flags = 0;
- pkt->port = port_id;
pkt->data_len = cqe_rx_w3.rb0_sz;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = 1;
pkt->pkt_len = cqe_rx_w3.rb0_sz;
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
-
+ nicvf_mbuff_init_update(pkt, mbuf_init, cqe_rx_w1.align_pad);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
- rte_mbuf_refcnt_set(pkt, 1);
rx_pkts[i] = pkt;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(pkt);
@@ -469,11 +464,10 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += to_process;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -= nicvf_fill_rbdr(rxq,
- rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
@@ -481,8 +475,9 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
static inline uint16_t __hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
- uint64_t mbuf_phys_off, uint8_t port_id,
- struct rte_mbuf **rx_pkt, uint8_t rbptr_offset)
+ uint64_t mbuf_phys_off,
+ struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
+ uint64_t mbuf_init)
{
struct rte_mbuf *pkt, *seg, *prev;
cqe_rx_word0_t cqe_rx_w0;
@@ -501,12 +496,10 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
pkt->ol_flags = 0;
- pkt->port = port_id;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = nb_segs;
pkt->pkt_len = cqe_rx_w1.pkt_len;
pkt->data_len = rb_sz[nicvf_frag_num(0)];
- rte_mbuf_refcnt_set(pkt, 1);
+ nicvf_mbuff_init_mseg_update(
+ pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
@@ -518,9 +511,7 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
prev->next = seg;
seg->data_len = rb_sz[nicvf_frag_num(seg_idx)];
- seg->port = port_id;
- seg->data_off = RTE_PKTMBUF_HEADROOM;
- rte_mbuf_refcnt_set(seg, 1);
+ nicvf_mbuff_init_update(seg, mbuf_init, 0);
prev = seg;
}
@@ -541,7 +532,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
uint32_t i, to_process, cqe_head, buffers_consumed = 0;
int32_t available_space = rxq->available_space;
uint16_t nb_segs;
- const uint8_t port_id = rxq->port_id;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
const uint8_t rbptr_offset = rxq->rbptr_offset;
cqe_head = rxq->head & cqe_mask;
@@ -552,7 +543,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
cq_entry = &desc[cqe_head];
cqe_rx = (struct cqe_rx_t *)cq_entry;
nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
- port_id, rx_pkts + i, rbptr_offset);
+ rx_pkts + i, rbptr_offset, mbuf_init);
buffers_consumed += nb_segs;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -563,11 +554,10 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += buffers_consumed;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -=
- nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 9dad8a5a..3631ff22 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -84,6 +84,33 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
}
#endif
+static inline void
+nicvf_mbuff_init_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
+ uint16_t apad)
+{
+ union mbuf_initializer init = {.value = mbuf_init};
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ init.fields.data_off += apad;
+#else
+ init.value += apad;
+#endif
+ *(uint64_t *)(&pkt->rearm_data) = init.value;
+}
+
+static inline void
+nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
+ uint16_t apad, uint16_t nb_segs)
+{
+ union mbuf_initializer init = {.value = mbuf_init};
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ init.fields.data_off += apad;
+#else
+ init.value += apad;
+#endif
+ init.fields.nb_segs = nb_segs;
+ *(uint64_t *)(&pkt->rearm_data) = init.value;
+}
+
uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index c900e121..34c41b79 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -43,8 +43,8 @@
#include <rte_memory.h>
struct nicvf_rbdr {
- uint64_t rbdr_status;
- uint64_t rbdr_door;
+ uintptr_t rbdr_status;
+ uintptr_t rbdr_door;
struct rbdr_entry_t *desc;
nicvf_phys_addr_t phys;
uint32_t buffsz;
@@ -58,8 +58,8 @@ struct nicvf_txq {
union sq_entry_t *desc;
nicvf_phys_addr_t phys;
struct rte_mbuf **txbuffs;
- uint64_t sq_head;
- uint64_t sq_door;
+ uintptr_t sq_head;
+ uintptr_t sq_door;
struct rte_mempool *pool;
struct nicvf *nic;
void (*pool_free)(struct nicvf_txq *sq);
@@ -72,10 +72,21 @@ struct nicvf_txq {
uint16_t tx_free_thresh;
} __rte_cache_aligned;
+union mbuf_initializer {
+ struct {
+ uint16_t data_off;
+ uint16_t refcnt;
+ uint16_t nb_segs;
+ uint16_t port;
+ } fields;
+ uint64_t value;
+};
+
struct nicvf_rxq {
uint64_t mbuf_phys_off;
- uint64_t cq_status;
- uint64_t cq_door;
+ uintptr_t cq_status;
+ uintptr_t cq_door;
+ union mbuf_initializer mbuf_initializer;
nicvf_phys_addr_t phys;
union cq_entry_t *desc;
struct nicvf_rbdr *shared_rbdr;
diff --git a/drivers/net/vhost/Makefile b/drivers/net/vhost/Makefile
index 050c5aa5..3ba8ad64 100644
--- a/drivers/net/vhost/Makefile
+++ b/drivers/net/vhost/Makefile
@@ -55,12 +55,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += rte_eth_vhost.c
#
SYMLINK-y-include += rte_eth_vhost.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mempool
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_kvargs
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_vhost
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 328dde08..257bf6d6 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -33,27 +33,26 @@
#include <unistd.h>
#include <pthread.h>
#include <stdbool.h>
-#ifdef RTE_LIBRTE_VHOST_NUMA
-#include <numaif.h>
-#endif
#include <rte_mbuf.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_vdev.h>
#include <rte_kvargs.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
#include <rte_spinlock.h>
#include "rte_eth_vhost.h"
+enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
+
#define ETH_VHOST_IFACE_ARG "iface"
#define ETH_VHOST_QUEUES_ARG "queues"
#define ETH_VHOST_CLIENT_ARG "client"
#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
-
-static const char *drivername = "VHOST PMD";
+#define VHOST_MAX_PKT_BURST 32
static const char *valid_arguments[] = {
ETH_VHOST_IFACE_ARG,
@@ -112,9 +111,11 @@ struct vhost_queue {
};
struct pmd_internal {
+ rte_atomic32_t dev_attached;
char *dev_name;
char *iface_name;
uint16_t max_queues;
+ rte_atomic32_t started;
};
struct internal_list {
@@ -128,9 +129,6 @@ static struct internal_list_head internal_list =
static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
-static rte_atomic16_t nb_started_ports;
-static pthread_t session_th;
-
static struct rte_eth_link pmd_link = {
.link_speed = 10000,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -391,6 +389,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
struct vhost_queue *r = q;
uint16_t i, nb_rx = 0;
+ uint16_t nb_receive = nb_bufs;
if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
return 0;
@@ -401,8 +400,20 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
goto out;
/* Dequeue packets from guest TX queue */
- nb_rx = rte_vhost_dequeue_burst(r->vid,
- r->virtqueue_id, r->mb_pool, bufs, nb_bufs);
+ while (nb_receive) {
+ uint16_t nb_pkts;
+ uint16_t num = (uint16_t)RTE_MIN(nb_receive,
+ VHOST_MAX_PKT_BURST);
+
+ nb_pkts = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id,
+ r->mb_pool, &bufs[nb_rx],
+ num);
+
+ nb_rx += nb_pkts;
+ nb_receive -= nb_pkts;
+ if (nb_pkts < num)
+ break;
+ }
r->stats.pkts += nb_rx;
@@ -424,6 +435,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
{
struct vhost_queue *r = q;
uint16_t i, nb_tx = 0;
+ uint16_t nb_send = nb_bufs;
if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
return 0;
@@ -434,8 +446,19 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
goto out;
/* Enqueue packets to guest RX queue */
- nb_tx = rte_vhost_enqueue_burst(r->vid,
- r->virtqueue_id, bufs, nb_bufs);
+ while (nb_send) {
+ uint16_t nb_pkts;
+ uint16_t num = (uint16_t)RTE_MIN(nb_send,
+ VHOST_MAX_PKT_BURST);
+
+ nb_pkts = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id,
+ &bufs[nb_tx], num);
+
+ nb_tx += nb_pkts;
+ nb_send -= nb_pkts;
+ if (nb_pkts < num)
+ break;
+ }
r->stats.pkts += nb_tx;
r->stats.missed_pkts += nb_bufs - nb_tx;
@@ -494,6 +517,38 @@ find_internal_resource(char *ifname)
return list;
}
+static void
+update_queuing_status(struct rte_eth_dev *dev)
+{
+ struct pmd_internal *internal = dev->data->dev_private;
+ struct vhost_queue *vq;
+ unsigned int i;
+ int allow_queuing = 1;
+
+ if (rte_atomic32_read(&internal->started) == 0 ||
+ rte_atomic32_read(&internal->dev_attached) == 0)
+ allow_queuing = 0;
+
+ /* Wait until rx/tx_pkt_burst stops accessing vhost device */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ vq = dev->data->rx_queues[i];
+ if (vq == NULL)
+ continue;
+ rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+ while (rte_atomic32_read(&vq->while_queuing))
+ rte_pause();
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ vq = dev->data->tx_queues[i];
+ if (vq == NULL)
+ continue;
+ rte_atomic32_set(&vq->allow_queuing, allow_queuing);
+ while (rte_atomic32_read(&vq->while_queuing))
+ rte_pause();
+ }
+}
+
static int
new_device(int vid)
{
@@ -540,23 +595,15 @@ new_device(int vid)
vq->port = eth_dev->data->port_id;
}
- for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++)
+ for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
rte_vhost_enable_guest_notification(vid, i, 0);
+ rte_vhost_get_mtu(vid, &eth_dev->data->mtu);
+
eth_dev->data->dev_link.link_status = ETH_LINK_UP;
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 1);
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 1);
- }
+ rte_atomic32_set(&internal->dev_attached, 1);
+ update_queuing_status(eth_dev);
RTE_LOG(INFO, PMD, "New connection established\n");
@@ -569,6 +616,7 @@ static void
destroy_device(int vid)
{
struct rte_eth_dev *eth_dev;
+ struct pmd_internal *internal;
struct vhost_queue *vq;
struct internal_list *list;
char ifname[PATH_MAX];
@@ -582,24 +630,10 @@ destroy_device(int vid)
return;
}
eth_dev = list->eth_dev;
+ internal = eth_dev->data->dev_private;
- /* Wait until rx/tx_pkt_burst stops accessing vhost device */
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 0);
- while (rte_atomic32_read(&vq->while_queuing))
- rte_pause();
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- rte_atomic32_set(&vq->allow_queuing, 0);
- while (rte_atomic32_read(&vq->while_queuing))
- rte_pause();
- }
+ rte_atomic32_set(&internal->dev_attached, 0);
+ update_queuing_status(eth_dev);
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
@@ -661,6 +695,12 @@ vring_state_changed(int vid, uint16_t vring, int enable)
return 0;
}
+static struct vhost_device_ops vhost_ops = {
+ .new_device = new_device,
+ .destroy_device = destroy_device,
+ .vring_state_changed = vring_state_changed,
+};
+
int
rte_eth_vhost_get_queue_event(uint8_t port_id,
struct rte_eth_vhost_queue_event *event)
@@ -727,60 +767,24 @@ rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
return vid;
}
-static void *
-vhost_driver_session(void *param __rte_unused)
-{
- static struct virtio_net_device_ops vhost_ops;
-
- /* set vhost arguments */
- vhost_ops.new_device = new_device;
- vhost_ops.destroy_device = destroy_device;
- vhost_ops.vring_state_changed = vring_state_changed;
- if (rte_vhost_driver_callback_register(&vhost_ops) < 0)
- RTE_LOG(ERR, PMD, "Can't register callbacks\n");
-
- /* start event handling */
- rte_vhost_driver_session_start();
-
- return NULL;
-}
-
static int
-vhost_driver_session_start(void)
+eth_dev_start(struct rte_eth_dev *dev)
{
- int ret;
+ struct pmd_internal *internal = dev->data->dev_private;
- ret = pthread_create(&session_th,
- NULL, vhost_driver_session, NULL);
- if (ret)
- RTE_LOG(ERR, PMD, "Can't create a thread\n");
+ rte_atomic32_set(&internal->started, 1);
+ update_queuing_status(dev);
- return ret;
-}
-
-static void
-vhost_driver_session_stop(void)
-{
- int ret;
-
- ret = pthread_cancel(session_th);
- if (ret)
- RTE_LOG(ERR, PMD, "Can't cancel the thread\n");
-
- ret = pthread_join(session_th, NULL);
- if (ret)
- RTE_LOG(ERR, PMD, "Can't join the thread\n");
-}
-
-static int
-eth_dev_start(struct rte_eth_dev *dev __rte_unused)
-{
return 0;
}
static void
-eth_dev_stop(struct rte_eth_dev *dev __rte_unused)
+eth_dev_stop(struct rte_eth_dev *dev)
{
+ struct pmd_internal *internal = dev->data->dev_private;
+
+ rte_atomic32_set(&internal->started, 0);
+ update_queuing_status(dev);
}
static void
@@ -788,11 +792,14 @@ eth_dev_close(struct rte_eth_dev *dev)
{
struct pmd_internal *internal;
struct internal_list *list;
+ unsigned int i;
internal = dev->data->dev_private;
if (!internal)
return;
+ eth_dev_stop(dev);
+
rte_vhost_driver_unregister(internal->iface_name);
list = find_internal_resource(internal->iface_name);
@@ -804,9 +811,17 @@ eth_dev_close(struct rte_eth_dev *dev)
pthread_mutex_unlock(&internal_list_lock);
rte_free(list);
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rte_free(dev->data->rx_queues[i]);
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_free(dev->data->tx_queues[i]);
+
+ rte_free(dev->data->mac_addrs);
free(internal->dev_name);
free(internal->iface_name);
rte_free(internal);
+
+ dev->data->dev_private = NULL;
}
static int
@@ -865,7 +880,6 @@ eth_dev_info(struct rte_eth_dev *dev,
return;
}
- dev_info->driver_name = drivername;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)-1;
dev_info->max_rx_queues = internal->max_queues;
@@ -943,35 +957,20 @@ eth_queue_release(void *q)
}
static int
-eth_link_update(struct rte_eth_dev *dev __rte_unused,
- int wait_to_complete __rte_unused)
+eth_tx_done_cleanup(void *txq __rte_unused, uint32_t free_cnt __rte_unused)
{
+ /*
+ * vHost does not hang onto mbuf. eth_vhost_tx() copies packet data
+ * and releases mbuf, so nothing to cleanup.
+ */
return 0;
}
-/**
- * Disable features in feature_mask. Returns 0 on success.
- */
-int
-rte_eth_vhost_feature_disable(uint64_t feature_mask)
-{
- return rte_vhost_feature_disable(feature_mask);
-}
-
-/**
- * Enable features in feature_mask. Returns 0 on success.
- */
-int
-rte_eth_vhost_feature_enable(uint64_t feature_mask)
-{
- return rte_vhost_feature_enable(feature_mask);
-}
-
-/* Returns currently supported vhost features */
-uint64_t
-rte_eth_vhost_feature_get(void)
+static int
+eth_link_update(struct rte_eth_dev *dev __rte_unused,
+ int wait_to_complete __rte_unused)
{
- return rte_vhost_feature_get();
+ return 0;
}
static const struct eth_dev_ops ops = {
@@ -984,6 +983,7 @@ static const struct eth_dev_ops ops = {
.tx_queue_setup = eth_tx_queue_setup,
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
+ .tx_done_cleanup = eth_tx_done_cleanup,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
@@ -992,10 +992,13 @@ static const struct eth_dev_ops ops = {
.xstats_get_names = vhost_dev_xstats_get_names,
};
+static struct rte_vdev_driver pmd_vhost_drv;
+
static int
-eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
- const unsigned numa_node, uint64_t flags)
+eth_dev_vhost_create(struct rte_vdev_device *dev, char *iface_name,
+ int16_t queues, const unsigned int numa_node, uint64_t flags)
{
+ const char *name = rte_vdev_device_name(dev);
struct rte_eth_dev_data *data = NULL;
struct pmd_internal *internal = NULL;
struct rte_eth_dev *eth_dev = NULL;
@@ -1006,23 +1009,19 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
RTE_LOG(INFO, PMD, "Creating VHOST-USER backend on numa socket %u\n",
numa_node);
- /* now do all data allocation - for eth_dev structure, dummy pci driver
- * and internal (private) data
+ /* now do all data allocation - for eth_dev structure and internal
+ * (private) data
*/
data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
if (data == NULL)
goto error;
- internal = rte_zmalloc_socket(name, sizeof(*internal), 0, numa_node);
- if (internal == NULL)
- goto error;
-
list = rte_zmalloc_socket(name, sizeof(*list), 0, numa_node);
if (list == NULL)
goto error;
/* reserve an ethdev entry */
- eth_dev = rte_eth_dev_allocate(name);
+ eth_dev = rte_eth_vdev_allocate(dev, sizeof(*internal));
if (eth_dev == NULL)
goto error;
@@ -1037,14 +1036,12 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
if (vring_state == NULL)
goto error;
- TAILQ_INIT(&eth_dev->link_intr_cbs);
-
/* now put it all together
* - store queue data in internal,
- * - store numa_node info in ethdev data
* - point eth_dev_data to internals
* - and point eth_dev structure to new eth_dev_data structure
*/
+ internal = eth_dev->data->dev_private;
internal->dev_name = strdup(name);
if (internal->dev_name == NULL)
goto error;
@@ -1060,26 +1057,21 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
rte_spinlock_init(&vring_state->lock);
vring_states[eth_dev->data->port_id] = vring_state;
- data->dev_private = internal;
- data->port_id = eth_dev->data->port_id;
- memmove(data->name, eth_dev->data->name, sizeof(data->name));
+ /* We'll replace the 'data' originally allocated by eth_dev. So the
+ * vhost PMD resources won't be shared between multi processes.
+ */
+ rte_memcpy(data, eth_dev->data, sizeof(*data));
+ eth_dev->data = data;
+
data->nb_rx_queues = queues;
data->nb_tx_queues = queues;
internal->max_queues = queues;
data->dev_link = pmd_link;
data->mac_addrs = eth_addr;
-
- /* We'll replace the 'data' originally allocated by eth_dev. So the
- * vhost PMD resources won't be shared between multi processes.
- */
- eth_dev->data = data;
- eth_dev->dev_ops = &ops;
- eth_dev->driver = NULL;
data->dev_flags =
RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
- data->kdrv = RTE_KDRV_NONE;
- data->drv_name = internal->dev_name;
- data->numa_node = numa_node;
+
+ eth_dev->dev_ops = &ops;
/* finally assign rx and tx ops */
eth_dev->rx_pkt_burst = eth_vhost_rx;
@@ -1088,17 +1080,24 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues,
if (rte_vhost_driver_register(iface_name, flags))
goto error;
- /* We need only one message handling thread */
- if (rte_atomic16_add_return(&nb_started_ports, 1) == 1) {
- if (vhost_driver_session_start())
- goto error;
+ if (rte_vhost_driver_callback_register(iface_name, &vhost_ops) < 0) {
+ RTE_LOG(ERR, PMD, "Can't register callbacks\n");
+ goto error;
+ }
+
+ if (rte_vhost_driver_start(iface_name) < 0) {
+ RTE_LOG(ERR, PMD, "Failed to start driver for %s\n",
+ iface_name);
+ goto error;
}
return data->port_id;
error:
- if (internal)
+ if (internal) {
+ free(internal->iface_name);
free(internal->dev_name);
+ }
rte_free(vring_state);
rte_free(eth_addr);
if (eth_dev)
@@ -1139,7 +1138,7 @@ open_int(const char *key __rte_unused, const char *value, void *extra_args)
}
static int
-rte_pmd_vhost_probe(const char *name, const char *params)
+rte_pmd_vhost_probe(struct rte_vdev_device *dev)
{
struct rte_kvargs *kvlist = NULL;
int ret = 0;
@@ -1149,9 +1148,10 @@ rte_pmd_vhost_probe(const char *name, const char *params)
int client_mode = 0;
int dequeue_zero_copy = 0;
- RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name);
+ RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
+ rte_vdev_device_name(dev));
- kvlist = rte_kvargs_parse(params, valid_arguments);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
if (kvlist == NULL)
return -1;
@@ -1194,7 +1194,11 @@ rte_pmd_vhost_probe(const char *name, const char *params)
flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
}
- eth_dev_vhost_create(name, iface_name, queues, rte_socket_id(), flags);
+ if (dev->device.numa_node == SOCKET_ID_ANY)
+ dev->device.numa_node = rte_socket_id();
+
+ eth_dev_vhost_create(dev, iface_name, queues, dev->device.numa_node,
+ flags);
out_free:
rte_kvargs_free(kvlist);
@@ -1202,11 +1206,12 @@ out_free:
}
static int
-rte_pmd_vhost_remove(const char *name)
+rte_pmd_vhost_remove(struct rte_vdev_device *dev)
{
+ const char *name;
struct rte_eth_dev *eth_dev = NULL;
- unsigned int i;
+ name = rte_vdev_device_name(dev);
RTE_LOG(INFO, PMD, "Un-Initializing pmd_vhost for %s\n", name);
/* find an ethdev entry */
@@ -1214,22 +1219,11 @@ rte_pmd_vhost_remove(const char *name)
if (eth_dev == NULL)
return -ENODEV;
- eth_dev_stop(eth_dev);
-
eth_dev_close(eth_dev);
- if (rte_atomic16_sub_return(&nb_started_ports, 1) == 0)
- vhost_driver_session_stop();
-
rte_free(vring_states[eth_dev->data->port_id]);
vring_states[eth_dev->data->port_id] = NULL;
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
- rte_free(eth_dev->data->rx_queues[i]);
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++)
- rte_free(eth_dev->data->tx_queues[i]);
-
- rte_free(eth_dev->data->mac_addrs);
rte_free(eth_dev->data);
rte_eth_dev_release_port(eth_dev);
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index 7c98b1ae..39ca7719 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -41,37 +41,7 @@ extern "C" {
#include <stdint.h>
#include <stdbool.h>
-#include <rte_virtio_net.h>
-
-/**
- * Disable features in feature_mask.
- *
- * @param feature_mask
- * Vhost features defined in "linux/virtio_net.h".
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-int rte_eth_vhost_feature_disable(uint64_t feature_mask);
-
-/**
- * Enable features in feature_mask.
- *
- * @param feature_mask
- * Vhost features defined in "linux/virtio_net.h".
- * @return
- * - On success, zero.
- * - On failure, a negative value.
- */
-int rte_eth_vhost_feature_enable(uint64_t feature_mask);
-
-/**
- * Returns currently supported vhost features.
- *
- * @return
- * Vhost features defined in "linux/virtio_net.h".
- */
-uint64_t rte_eth_vhost_feature_get(void);
+#include <rte_vhost.h>
/*
* Event description.
diff --git a/drivers/net/vhost/rte_pmd_vhost_version.map b/drivers/net/vhost/rte_pmd_vhost_version.map
index 3d44083f..695db857 100644
--- a/drivers/net/vhost/rte_pmd_vhost_version.map
+++ b/drivers/net/vhost/rte_pmd_vhost_version.map
@@ -1,9 +1,6 @@
DPDK_16.04 {
global:
- rte_eth_vhost_feature_disable;
- rte_eth_vhost_feature_enable;
- rte_eth_vhost_feature_get;
rte_eth_vhost_get_queue_event;
local: *;
diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile
index 97972a6c..b21b8781 100644
--- a/drivers/net/virtio/Makefile
+++ b/drivers/net/virtio/Makefile
@@ -60,14 +60,10 @@ endif
ifeq ($(CONFIG_RTE_VIRTIO_USER),y)
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_user.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_kernel.c
+SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_kernel_tap.c
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/virtio_user_dev.c
SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user_ethdev.c
endif
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_kvargs
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c
index f5961ab7..983b95f1 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -38,6 +38,7 @@
#include <unistd.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_memzone.h>
@@ -86,7 +87,7 @@ static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
-static void virtio_mac_addr_add(struct rte_eth_dev *dev,
+static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq __rte_unused);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
@@ -485,11 +486,11 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
hw->cvq = cvq;
}
- /* For virtio_user case (that is when dev->pci_dev is NULL), we use
+ /* For virtio_user case (that is when hw->dev is NULL), we use
* virtual address. And we need properly set _offset_, please see
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
*/
- if (dev->pci_dev)
+ if (!hw->virtio_user_dev)
vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
else {
vq->vq_ring_mem = (uintptr_t)mz->addr;
@@ -545,6 +546,9 @@ virtio_free_queues(struct virtio_hw *hw)
int queue_type;
uint16_t i;
+ if (hw->vqs == NULL)
+ return;
+
for (i = 0; i < nr_vq; i++) {
vq = hw->vqs[i];
if (!vq)
@@ -563,9 +567,11 @@ virtio_free_queues(struct virtio_hw *hw)
}
rte_free(vq);
+ hw->vqs[i] = NULL;
}
rte_free(hw->vqs);
+ hw->vqs = NULL;
}
static int
@@ -593,16 +599,29 @@ virtio_alloc_queues(struct rte_eth_dev *dev)
return 0;
}
+static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
+
static void
virtio_dev_close(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
+ struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
/* reset the NIC */
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR);
+ VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
+ if (intr_conf->rxq)
+ virtio_queues_unbind_intr(dev);
+
+ if (intr_conf->lsc || intr_conf->rxq) {
+ rte_intr_disable(dev->intr_handle);
+ rte_intr_efd_disable(dev->intr_handle);
+ rte_free(dev->intr_handle->intr_vec);
+ dev->intr_handle->intr_vec = NULL;
+ }
+
vtpci_reset(hw);
virtio_dev_free_mbufs(dev);
virtio_free_queues(hw);
@@ -617,7 +636,7 @@ virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -640,7 +659,7 @@ virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -663,7 +682,7 @@ virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -686,7 +705,7 @@ virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
int ret;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
- PMD_INIT_LOG(INFO, "host does not support rx control\n");
+ PMD_INIT_LOG(INFO, "host does not support rx control");
return;
}
@@ -708,15 +727,38 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
hw->vtnet_hdr_size;
uint32_t frame_size = mtu + ether_hdr_len;
+ uint32_t max_frame_size = hw->max_mtu + ether_hdr_len;
+
+ max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN);
- if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) {
- PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n",
- ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len);
+ if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) {
+ PMD_INIT_LOG(ERR, "MTU should be between %d and %d",
+ ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
return -EINVAL;
}
return 0;
}
+static int
+virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+ struct virtqueue *vq = rxvq->vq;
+
+ virtqueue_enable_intr(vq);
+ return 0;
+}
+
+static int
+virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
+ struct virtqueue *vq = rxvq->vq;
+
+ virtqueue_disable_intr(vq);
+ return 0;
+}
+
/*
* dev_ops for virtio, bare necessities for basic operation
*/
@@ -738,7 +780,10 @@ static const struct eth_dev_ops virtio_eth_dev_ops = {
.xstats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
.rx_queue_setup = virtio_dev_rx_queue_setup,
+ .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
+ .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
.rx_queue_release = virtio_dev_queue_release,
+ .rx_descriptor_done = virtio_dev_rx_queue_done,
.tx_queue_setup = virtio_dev_tx_queue_setup,
.tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
@@ -980,7 +1025,7 @@ virtio_get_hwaddr(struct virtio_hw *hw)
}
}
-static void
+static int
virtio_mac_table_set(struct virtio_hw *hw,
const struct virtio_net_ctrl_mac *uc,
const struct virtio_net_ctrl_mac *mc)
@@ -990,7 +1035,7 @@ virtio_mac_table_set(struct virtio_hw *hw,
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
PMD_DRV_LOG(INFO, "host does not support mac table");
- return;
+ return -1;
}
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
@@ -1005,9 +1050,10 @@ virtio_mac_table_set(struct virtio_hw *hw,
err = virtio_send_command(hw->cvq, &ctrl, len, 2);
if (err != 0)
PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
+ return err;
}
-static void
+static int
virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq __rte_unused)
{
@@ -1018,7 +1064,7 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
if (index >= VIRTIO_MAX_MAC_ADDRS) {
PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
- return;
+ return -EINVAL;
}
uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
@@ -1035,7 +1081,7 @@ virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
}
- virtio_mac_table_set(hw, uc, mc);
+ return virtio_mac_table_set(hw, uc, mc);
}
static void
@@ -1122,6 +1168,18 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
host_features);
+ /* If supported, ensure MTU value is valid before acknowledging it. */
+ if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
+ struct virtio_net_config config;
+
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mtu),
+ &config.mtu, sizeof(config.mtu));
+
+ if (config.mtu < ETHER_MIN_MTU)
+ req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
+ }
+
/*
* Negotiate features: Subset of device feature bits are written back
* guest feature bits.
@@ -1154,9 +1212,8 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
* Process Virtio Config changed interrupt and call the callback
* if link state changed.
*/
-static void
-virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
- void *param)
+void
+virtio_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = param;
struct virtio_hw *hw = dev->data->dev_private;
@@ -1166,7 +1223,7 @@ virtio_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
isr = vtpci_isr(hw);
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
- if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0)
+ if (rte_intr_enable(dev->intr_handle) < 0)
PMD_DRV_LOG(ERR, "interrupt enable failed");
if (isr & VIRTIO_PCI_ISR_CONFIG) {
@@ -1187,6 +1244,95 @@ rx_func_get(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
+/* Only support 1:1 queue/interrupt mapping so far.
+ * TODO: support n:1 queue/interrupt mapping when there are limited number of
+ * interrupt vectors (<N+1).
+ */
+static int
+virtio_queues_bind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt binding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ dev->intr_handle->intr_vec[i] = i + 1;
+ if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
+ VIRTIO_MSI_NO_VECTOR) {
+ PMD_DRV_LOG(ERR, "failed to set queue vector");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void
+virtio_queues_unbind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i)
+ VTPCI_OPS(hw)->set_queue_irq(hw,
+ hw->vqs[i * VTNET_CQ],
+ VIRTIO_MSI_NO_VECTOR);
+}
+
+static int
+virtio_configure_intr(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (!rte_intr_cap_multiple(dev->intr_handle)) {
+ PMD_INIT_LOG(ERR, "Multiple intr vector not supported");
+ return -ENOTSUP;
+ }
+
+ if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) {
+ PMD_INIT_LOG(ERR, "Fail to create eventfd");
+ return -1;
+ }
+
+ if (!dev->intr_handle->intr_vec) {
+ dev->intr_handle->intr_vec =
+ rte_zmalloc("intr_vec",
+ hw->max_queue_pairs * sizeof(int), 0);
+ if (!dev->intr_handle->intr_vec) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors",
+ hw->max_queue_pairs);
+ return -ENOMEM;
+ }
+ }
+
+ /* Re-register callback to update max_intr */
+ rte_intr_callback_unregister(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+ rte_intr_callback_register(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+
+ /* DO NOT try to remove this! This function will enable msix, or QEMU
+ * will encounter SIGSEGV when DRIVER_OK is sent.
+ * And for legacy devices, this should be done before queue/vec binding
+ * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
+ * (22) will be ignored.
+ */
+ if (rte_intr_enable(dev->intr_handle) < 0) {
+ PMD_DRV_LOG(ERR, "interrupt enable failed");
+ return -1;
+ }
+
+ if (virtio_queues_bind_intr(dev) < 0) {
+ PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt");
+ return -1;
+ }
+
+ return 0;
+}
+
/* reset device and renegotiate features if needed */
static int
virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
@@ -1194,7 +1340,7 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
struct virtio_hw *hw = eth_dev->data->dev_private;
struct virtio_net_config *config;
struct virtio_net_config local_config;
- struct rte_pci_device *pci_dev = eth_dev->pci_dev;
+ struct rte_pci_device *pci_dev = NULL;
int ret;
/* Reset the device although not necessary at startup */
@@ -1208,13 +1354,17 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
- rte_eth_copy_pci_info(eth_dev, pci_dev);
+ if (!hw->virtio_user_dev) {
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+ }
- /* If host does not support status then disable LSC */
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
- eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- else
+ eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
+ /* If host does not support both status and MSI-X then disable LSC */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ else
+ eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
rx_func_get(eth_dev);
@@ -1264,6 +1414,32 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
hw->max_queue_pairs = config->max_virtqueue_pairs;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, mtu),
+ &config->mtu,
+ sizeof(config->mtu));
+
+ /*
+ * MTU value has already been checked at negotiation
+ * time, but check again in case it has changed since
+ * then, which should not happen.
+ */
+ if (config->mtu < ETHER_MIN_MTU) {
+ PMD_INIT_LOG(ERR, "invalid max MTU value (%u)",
+ config->mtu);
+ return -1;
+ }
+
+ hw->max_mtu = config->mtu;
+ /* Set initial MTU to maximum one supported by vhost */
+ eth_dev->data->mtu = config->mtu;
+
+ } else {
+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
+ VLAN_TAG_LEN - hw->vtnet_hdr_size;
+ }
+
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d",
config->max_virtqueue_pairs);
PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
@@ -1280,6 +1456,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
ret = virtio_alloc_queues(eth_dev);
if (ret < 0)
return ret;
+
+ if (eth_dev->data->dev_conf.intr_conf.rxq) {
+ if (virtio_configure_intr(eth_dev) < 0) {
+ PMD_INIT_LOG(ERR, "failed to configure interrupt");
+ return -1;
+ }
+ }
+
vtpci_reinit_complete(hw);
if (pci_dev)
@@ -1301,7 +1485,7 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
if (hw->modern) {
/*
* We don't have to re-parse the PCI config space, since
- * rte_eal_pci_map_device() makes sure the mapped address
+ * rte_pci_map_device() makes sure the mapped address
* in secondary process would equal to the one mapped in
* the primary process: error will be returned if that
* requirement is not met.
@@ -1310,12 +1494,12 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
* (such as dev_cfg, common_cfg, etc.) parsed from the
* primary process, which is stored in shared memory.
*/
- if (rte_eal_pci_map_device(pci_dev)) {
+ if (rte_pci_map_device(pci_dev)) {
PMD_INIT_LOG(DEBUG, "failed to map pci device!");
return -1;
}
} else {
- if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
+ if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
return -1;
}
@@ -1344,8 +1528,6 @@ int
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- struct rte_pci_device *pci_dev;
- uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
int ret;
RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
@@ -1355,7 +1537,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
- ret = virtio_remap_pci(eth_dev->pci_dev, hw);
+ ret = virtio_remap_pci(RTE_DEV_TO_PCI(eth_dev->device),
+ hw);
if (ret)
return ret;
}
@@ -1379,17 +1562,16 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
return -ENOMEM;
}
- pci_dev = eth_dev->pci_dev;
hw->port_id = eth_dev->data->port_id;
-
- if (pci_dev) {
- ret = vtpci_init(pci_dev, hw, &dev_flags);
+ /* For virtio_user case the hw->virtio_user_dev is populated by
+ * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
+ */
+ if (!hw->virtio_user_dev) {
+ ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw);
if (ret)
return ret;
}
- eth_dev->data->dev_flags = dev_flags;
-
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
@@ -1397,7 +1579,7 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
/* Setup interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_register(&pci_dev->intr_handle,
+ rte_intr_callback_register(eth_dev->intr_handle,
virtio_interrupt_handler, eth_dev);
return 0;
@@ -1406,8 +1588,6 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
static int
eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct rte_pci_device *pci_dev;
-
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
@@ -1415,7 +1595,6 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
virtio_dev_stop(eth_dev);
virtio_dev_close(eth_dev);
- pci_dev = eth_dev->pci_dev;
eth_dev->dev_ops = NULL;
eth_dev->tx_pkt_burst = NULL;
@@ -1426,29 +1605,37 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
/* reset interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_unregister(&pci_dev->intr_handle,
+ rte_intr_callback_unregister(eth_dev->intr_handle,
virtio_interrupt_handler,
eth_dev);
- rte_eal_pci_unmap_device(pci_dev);
+ if (eth_dev->device)
+ rte_pci_unmap_device(RTE_DEV_TO_PCI(eth_dev->device));
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
return 0;
}
-static struct eth_driver rte_virtio_pmd = {
- .pci_drv = {
- .driver = {
- .name = "net_virtio",
- },
- .id_table = pci_id_virtio_map,
- .drv_flags = RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
+static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
+ eth_virtio_dev_init);
+}
+
+static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
+}
+
+static struct rte_pci_driver rte_virtio_pmd = {
+ .driver = {
+ .name = "net_virtio",
},
- .eth_dev_init = eth_virtio_dev_init,
- .eth_dev_uninit = eth_virtio_dev_uninit,
- .dev_private_size = sizeof(struct virtio_hw),
+ .id_table = pci_id_virtio_map,
+ .drv_flags = 0,
+ .probe = eth_virtio_pci_probe,
+ .remove = eth_virtio_pci_remove,
};
RTE_INIT(rte_virtio_pmd_init);
@@ -1460,7 +1647,7 @@ rte_virtio_pmd_init(void)
return;
}
- rte_eal_pci_register(&rte_virtio_pmd.pci_drv);
+ rte_pci_register(&rte_virtio_pmd);
}
/*
@@ -1520,7 +1707,9 @@ virtio_dev_configure(struct rte_eth_dev *dev)
}
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) {
+ /* Enable vector (0) for Link State Intrerrupt */
+ if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
+ VIRTIO_MSI_NO_VECTOR) {
PMD_DRV_LOG(ERR, "failed to set config vector");
return -EBUSY;
}
@@ -1543,16 +1732,22 @@ virtio_dev_start(struct rte_eth_dev *dev)
PMD_DRV_LOG(ERR, "link status not supported by host");
return -ENOTSUP;
}
+ }
- if (rte_intr_enable(&dev->pci_dev->intr_handle) < 0) {
+ /* Enable uio/vfio intr/eventfd mapping: althrough we already did that
+ * in device configure, but it could be unmapped when device is
+ * stopped.
+ */
+ if (dev->data->dev_conf.intr_conf.lsc ||
+ dev->data->dev_conf.intr_conf.rxq) {
+ rte_intr_disable(dev->intr_handle);
+
+ if (rte_intr_enable(dev->intr_handle) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -EIO;
}
}
- /* Initialize Link state */
- virtio_dev_link_update(dev, 0);
-
/*Notify the backend
*Otherwise the tap backend might already stop its queue due to fullness.
*vhost backend will have no chance to be waked up
@@ -1582,6 +1777,11 @@ virtio_dev_start(struct rte_eth_dev *dev)
VIRTQUEUE_DUMP(txvq->vq);
}
+ hw->started = 1;
+
+ /* Initialize Link state */
+ virtio_dev_link_update(dev, 0);
+
return 0;
}
@@ -1636,13 +1836,16 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev)
static void
virtio_dev_stop(struct rte_eth_dev *dev)
{
+ struct virtio_hw *hw = dev->data->dev_private;
struct rte_eth_link link;
+ struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "stop");
- if (dev->data->dev_conf.intr_conf.lsc)
- rte_intr_disable(&dev->pci_dev->intr_handle);
+ if (intr_conf->lsc || intr_conf->rxq)
+ rte_intr_disable(dev->intr_handle);
+ hw->started = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_write_link_status(dev, &link);
}
@@ -1659,7 +1862,9 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = SPEED_10G;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ if (hw->started == 0) {
+ link.link_status = ETH_LINK_DOWN;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
@@ -1684,13 +1889,12 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
- uint64_t tso_mask;
+ uint64_t tso_mask, host_features;
struct virtio_hw *hw = dev->data->dev_private;
- if (dev->pci_dev)
- dev_info->driver_name = dev->driver->pci_drv.driver.name;
- else
- dev_info->driver_name = "virtio_user PMD";
+ dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
+
+ dev_info->pci_dev = dev->device ? RTE_DEV_TO_PCI(dev->device) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =
@@ -1701,18 +1905,25 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->default_txconf = (struct rte_eth_txconf) {
.txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
};
- dev_info->rx_offload_capa =
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO;
- dev_info->tx_offload_capa = 0;
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ dev_info->rx_offload_capa = 0;
+ if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
+ dev_info->rx_offload_capa |=
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM;
+ }
+ tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6);
+ if ((host_features & tso_mask) == tso_mask)
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ dev_info->tx_offload_capa = 0;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
}
-
tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
if ((hw->guest_features & tso_mask) == tso_mask)
@@ -1732,3 +1943,4 @@ __rte_unused uint8_t is_rx)
RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h
index 4feccf93..c3413c6d 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -51,7 +51,7 @@
#define VIRTIO_MAX_TX_QUEUES 128U
#define VIRTIO_MAX_MAC_ADDRS 64
#define VIRTIO_MIN_RX_BUFSIZE 64
-#define VIRTIO_MAX_RX_PKTLEN 9728
+#define VIRTIO_MAX_RX_PKTLEN 9728U
/* Features desired/implemented by this driver. */
#define VIRTIO_PMD_DEFAULT_GUEST_FEATURES \
@@ -66,6 +66,7 @@
1u << VIRTIO_NET_F_HOST_TSO4 | \
1u << VIRTIO_NET_F_HOST_TSO6 | \
1u << VIRTIO_NET_F_MRG_RXBUF | \
+ 1u << VIRTIO_NET_F_MTU | \
1u << VIRTIO_RING_F_INDIRECT_DESC | \
1ULL << VIRTIO_F_VERSION_1 | \
1ULL << VIRTIO_F_IOMMU_PLATFORM)
@@ -83,6 +84,9 @@ void virtio_dev_cq_start(struct rte_eth_dev *dev);
/*
* RX/TX function prototypes
*/
+
+int virtio_dev_rx_queue_done(void *rxq, uint16_t offset);
+
int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
@@ -109,4 +113,6 @@ uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
int eth_virtio_dev_init(struct rte_eth_dev *eth_dev);
+void virtio_interrupt_handler(void *param);
+
#endif /* _VIRTIO_ETHDEV_H_ */
diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c
index 8d5355c7..b7b3d615 100644
--- a/drivers/net/virtio/virtio_pci.c
+++ b/drivers/net/virtio/virtio_pci.c
@@ -37,6 +37,8 @@
#include <fcntl.h>
#endif
+#include <rte_io.h>
+
#include "virtio_pci.h"
#include "virtio_logs.h"
#include "virtqueue.h"
@@ -48,6 +50,7 @@
*/
#define PCI_CAPABILITY_LIST 0x34
#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
/*
* The remaining space is defined by each driver as the per-driver
@@ -92,17 +95,17 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
while (length > 0) {
if (length >= 4) {
size = 4;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
*(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
} else if (length >= 2) {
size = 2;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
*(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
} else {
size = 1;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
VIRTIO_PCI_CONFIG(hw) + offset);
}
@@ -111,8 +114,8 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
length -= size;
}
#else
- rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, length,
- VIRTIO_PCI_CONFIG(hw) + offset);
+ rte_pci_ioport_read(VTPCI_IO(hw), dst, length,
+ VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}
@@ -131,16 +134,16 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
if (length >= 4) {
size = 4;
tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
+ rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
VIRTIO_PCI_CONFIG(hw) + offset);
} else if (length >= 2) {
size = 2;
tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
+ rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
VIRTIO_PCI_CONFIG(hw) + offset);
} else {
size = 1;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), src, size,
+ rte_pci_ioport_write(VTPCI_IO(hw), src, size,
VIRTIO_PCI_CONFIG(hw) + offset);
}
@@ -149,8 +152,8 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
length -= size;
}
#else
- rte_eal_pci_ioport_write(VTPCI_IO(hw), src, length,
- VIRTIO_PCI_CONFIG(hw) + offset);
+ rte_pci_ioport_write(VTPCI_IO(hw), src, length,
+ VIRTIO_PCI_CONFIG(hw) + offset);
#endif
}
@@ -159,8 +162,7 @@ legacy_get_features(struct virtio_hw *hw)
{
uint32_t dst;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 4,
- VIRTIO_PCI_HOST_FEATURES);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
return dst;
}
@@ -172,8 +174,8 @@ legacy_set_features(struct virtio_hw *hw, uint64_t features)
"only 32 bit features are allowed for legacy virtio!");
return;
}
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &features, 4,
- VIRTIO_PCI_GUEST_FEATURES);
+ rte_pci_ioport_write(VTPCI_IO(hw), &features, 4,
+ VIRTIO_PCI_GUEST_FEATURES);
}
static uint8_t
@@ -181,14 +183,14 @@ legacy_get_status(struct virtio_hw *hw)
{
uint8_t dst;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
return dst;
}
static void
legacy_set_status(struct virtio_hw *hw, uint8_t status)
{
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
+ rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
}
static void
@@ -202,7 +204,7 @@ legacy_get_isr(struct virtio_hw *hw)
{
uint8_t dst;
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
return dst;
}
@@ -212,10 +214,20 @@ legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
{
uint16_t dst;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2,
- VIRTIO_MSI_CONFIG_VECTOR);
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2,
- VIRTIO_MSI_CONFIG_VECTOR);
+ rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
+ return dst;
+}
+
+static uint16_t
+legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
+{
+ uint16_t dst;
+
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
return dst;
}
@@ -224,9 +236,8 @@ legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
{
uint16_t dst;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2,
- VIRTIO_PCI_QUEUE_SEL);
- rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
+ rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
return dst;
}
@@ -238,10 +249,10 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
if (!check_vq_phys_addr_ok(vq))
return -1;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
- VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_SEL);
src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
+ rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
return 0;
}
@@ -251,57 +262,16 @@ legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
uint32_t src = 0;
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
- VIRTIO_PCI_QUEUE_SEL);
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_SEL);
+ rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
}
static void
legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
- rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
- VIRTIO_PCI_QUEUE_NOTIFY);
-}
-
-#ifdef RTE_EXEC_ENV_LINUXAPP
-static int
-legacy_virtio_has_msix(const struct rte_pci_addr *loc)
-{
- DIR *d;
- char dirname[PATH_MAX];
-
- snprintf(dirname, sizeof(dirname),
- "%s/" PCI_PRI_FMT "/msi_irqs", pci_get_sysfs_path(),
- loc->domain, loc->bus, loc->devid, loc->function);
-
- d = opendir(dirname);
- if (d)
- closedir(d);
-
- return d != NULL;
-}
-#else
-static int
-legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
-{
- /* nic_uio does not enable interrupts, return 0 (false). */
- return 0;
-}
-#endif
-
-static int
-legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
- struct virtio_hw *hw, uint32_t *dev_flags)
-{
- if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
- return -1;
-
- if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN)
- *dev_flags |= RTE_ETH_DEV_INTR_LSC;
- else
- *dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
-
- return 0;
+ rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
+ VIRTIO_PCI_QUEUE_NOTIFY);
}
const struct virtio_pci_ops legacy_ops = {
@@ -314,54 +284,18 @@ const struct virtio_pci_ops legacy_ops = {
.set_features = legacy_set_features,
.get_isr = legacy_get_isr,
.set_config_irq = legacy_set_config_irq,
+ .set_queue_irq = legacy_set_queue_irq,
.get_queue_num = legacy_get_queue_num,
.setup_queue = legacy_setup_queue,
.del_queue = legacy_del_queue,
.notify_queue = legacy_notify_queue,
};
-
-static inline uint8_t
-io_read8(uint8_t *addr)
-{
- return *(volatile uint8_t *)addr;
-}
-
-static inline void
-io_write8(uint8_t val, uint8_t *addr)
-{
- *(volatile uint8_t *)addr = val;
-}
-
-static inline uint16_t
-io_read16(uint16_t *addr)
-{
- return *(volatile uint16_t *)addr;
-}
-
-static inline void
-io_write16(uint16_t val, uint16_t *addr)
-{
- *(volatile uint16_t *)addr = val;
-}
-
-static inline uint32_t
-io_read32(uint32_t *addr)
-{
- return *(volatile uint32_t *)addr;
-}
-
-static inline void
-io_write32(uint32_t val, uint32_t *addr)
-{
- *(volatile uint32_t *)addr = val;
-}
-
static inline void
io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
{
- io_write32(val & ((1ULL << 32) - 1), lo);
- io_write32(val >> 32, hi);
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
}
static void
@@ -373,13 +307,13 @@ modern_read_dev_config(struct virtio_hw *hw, size_t offset,
uint8_t old_gen, new_gen;
do {
- old_gen = io_read8(&hw->common_cfg->config_generation);
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
p = dst;
for (i = 0; i < length; i++)
- *p++ = io_read8((uint8_t *)hw->dev_cfg + offset + i);
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
- new_gen = io_read8(&hw->common_cfg->config_generation);
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
} while (old_gen != new_gen);
}
@@ -391,7 +325,7 @@ modern_write_dev_config(struct virtio_hw *hw, size_t offset,
const uint8_t *p = src;
for (i = 0; i < length; i++)
- io_write8(*p++, (uint8_t *)hw->dev_cfg + offset + i);
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
}
static uint64_t
@@ -399,11 +333,11 @@ modern_get_features(struct virtio_hw *hw)
{
uint32_t features_lo, features_hi;
- io_write32(0, &hw->common_cfg->device_feature_select);
- features_lo = io_read32(&hw->common_cfg->device_feature);
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
- io_write32(1, &hw->common_cfg->device_feature_select);
- features_hi = io_read32(&hw->common_cfg->device_feature);
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
return ((uint64_t)features_hi << 32) | features_lo;
}
@@ -411,25 +345,25 @@ modern_get_features(struct virtio_hw *hw)
static void
modern_set_features(struct virtio_hw *hw, uint64_t features)
{
- io_write32(0, &hw->common_cfg->guest_feature_select);
- io_write32(features & ((1ULL << 32) - 1),
- &hw->common_cfg->guest_feature);
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1),
+ &hw->common_cfg->guest_feature);
- io_write32(1, &hw->common_cfg->guest_feature_select);
- io_write32(features >> 32,
- &hw->common_cfg->guest_feature);
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32,
+ &hw->common_cfg->guest_feature);
}
static uint8_t
modern_get_status(struct virtio_hw *hw)
{
- return io_read8(&hw->common_cfg->device_status);
+ return rte_read8(&hw->common_cfg->device_status);
}
static void
modern_set_status(struct virtio_hw *hw, uint8_t status)
{
- io_write8(status, &hw->common_cfg->device_status);
+ rte_write8(status, &hw->common_cfg->device_status);
}
static void
@@ -442,21 +376,29 @@ modern_reset(struct virtio_hw *hw)
static uint8_t
modern_get_isr(struct virtio_hw *hw)
{
- return io_read8(hw->isr);
+ return rte_read8(hw->isr);
}
static uint16_t
modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
{
- io_write16(vec, &hw->common_cfg->msix_config);
- return io_read16(&hw->common_cfg->msix_config);
+ rte_write16(vec, &hw->common_cfg->msix_config);
+ return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+ return rte_read16(&hw->common_cfg->queue_msix_vector);
}
static uint16_t
modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
{
- io_write16(queue_id, &hw->common_cfg->queue_select);
- return io_read16(&hw->common_cfg->queue_size);
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
}
static int
@@ -474,7 +416,7 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
ring[vq->vq_nentries]),
VIRTIO_PCI_VRING_ALIGN);
- io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
&hw->common_cfg->queue_desc_hi);
@@ -483,11 +425,11 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
&hw->common_cfg->queue_used_hi);
- notify_off = io_read16(&hw->common_cfg->queue_notify_off);
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
notify_off * hw->notify_off_multiplier);
- io_write16(1, &hw->common_cfg->queue_enable);
+ rte_write16(1, &hw->common_cfg->queue_enable);
PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
@@ -502,7 +444,7 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
static void
modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
{
- io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
&hw->common_cfg->queue_desc_hi);
@@ -511,13 +453,13 @@ modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
&hw->common_cfg->queue_used_hi);
- io_write16(0, &hw->common_cfg->queue_enable);
+ rte_write16(0, &hw->common_cfg->queue_enable);
}
static void
modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
{
- io_write16(1, vq->notify_addr);
+ rte_write16(vq->vq_queue_index, vq->notify_addr);
}
const struct virtio_pci_ops modern_ops = {
@@ -530,6 +472,7 @@ const struct virtio_pci_ops modern_ops = {
.set_features = modern_set_features,
.get_isr = modern_get_isr,
.set_config_irq = modern_set_config_irq,
+ .set_queue_irq = modern_set_queue_irq,
.get_queue_num = modern_get_queue_num,
.setup_queue = modern_setup_queue,
.del_queue = modern_del_queue,
@@ -601,14 +544,6 @@ vtpci_isr(struct virtio_hw *hw)
return VTPCI_OPS(hw)->get_isr(hw);
}
-
-/* Enable one vector (0) for Link State Intrerrupt */
-uint16_t
-vtpci_irq_config(struct virtio_hw *hw, uint16_t vec)
-{
- return VTPCI_OPS(hw)->set_config_irq(hw, vec);
-}
-
static void *
get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
{
@@ -651,25 +586,28 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
struct virtio_pci_cap cap;
int ret;
- if (rte_eal_pci_map_device(dev)) {
+ if (rte_pci_map_device(dev)) {
PMD_INIT_LOG(DEBUG, "failed to map pci device!");
return -1;
}
- ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
if (ret < 0) {
PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
return -1;
}
while (pos) {
- ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos);
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
if (ret < 0) {
PMD_INIT_LOG(ERR,
"failed to read pci cap at pos: %x", pos);
break;
}
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX)
+ hw->use_msix = 1;
+
if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
PMD_INIT_LOG(DEBUG,
"[%2x] skipping non VNDR cap id: %02x",
@@ -686,8 +624,8 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
hw->common_cfg = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_NOTIFY_CFG:
- rte_eal_pci_read_config(dev, &hw->notify_off_multiplier,
- 4, pos + sizeof(cap));
+ rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
hw->notify_base = get_cfg_addr(dev, &cap);
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
@@ -728,11 +666,8 @@ next:
* Return 0 on success.
*/
int
-vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
- uint32_t *dev_flags)
+vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
{
- hw->dev = dev;
-
/*
* Try if we can succeed reading virtio pci caps, which exists
* only on modern pci device. If failed, we fallback to legacy
@@ -742,12 +677,11 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
PMD_INIT_LOG(INFO, "modern virtio pci detected.");
virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
hw->modern = 1;
- *dev_flags |= RTE_ETH_DEV_INTR_LSC;
return 0;
}
PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
- if (legacy_virtio_resource_init(dev, hw, dev_flags) < 0) {
+ if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) {
if (dev->kdrv == RTE_KDRV_UNKNOWN &&
(!dev->device.devargs ||
dev->device.devargs->type !=
@@ -760,7 +694,6 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
}
virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
- hw->use_msix = legacy_virtio_has_msix(&dev->addr);
hw->modern = 0;
return 0;
diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h
index 511a1c87..18caebdd 100644
--- a/drivers/net/virtio/virtio_pci.h
+++ b/drivers/net/virtio/virtio_pci.h
@@ -106,6 +106,7 @@ struct virtnet_ctl;
/* The feature bitmap for virtio net */
#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
+#define VIRTIO_NET_F_MTU 3 /* Initial MTU advice. */
#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -160,7 +161,8 @@ struct virtnet_ctl;
/*
* Maximum number of virtqueues per device.
*/
-#define VIRTIO_MAX_VIRTQUEUES 8
+#define VIRTIO_MAX_VIRTQUEUE_PAIRS 8
+#define VIRTIO_MAX_VIRTQUEUES (VIRTIO_MAX_VIRTQUEUE_PAIRS * 2 + 1)
/* Common configuration */
#define VIRTIO_PCI_CAP_COMMON_CFG 1
@@ -235,6 +237,9 @@ struct virtio_pci_ops {
uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec);
+ uint16_t (*set_queue_irq)(struct virtio_hw *hw, struct virtqueue *vq,
+ uint16_t vec);
+
uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id);
int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq);
void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq);
@@ -248,6 +253,8 @@ struct virtio_hw {
uint64_t req_guest_features;
uint64_t guest_features;
uint32_t max_queue_pairs;
+ uint16_t started;
+ uint16_t max_mtu;
uint16_t vtnet_hdr_size;
uint8_t vlan_strip;
uint8_t use_msix;
@@ -258,7 +265,6 @@ struct virtio_hw {
uint32_t notify_off_multiplier;
uint8_t *isr;
uint16_t *notify_base;
- struct rte_pci_device *dev;
struct virtio_pci_common_cfg *common_cfg;
struct virtio_net_config *dev_cfg;
void *virtio_user_dev;
@@ -294,6 +300,7 @@ struct virtio_net_config {
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
uint16_t status;
uint16_t max_virtqueue_pairs;
+ uint16_t mtu;
} __attribute__((packed));
/*
@@ -314,8 +321,7 @@ vtpci_with_feature(struct virtio_hw *hw, uint64_t bit)
/*
* Function declaration from virtio_pci.c
*/
-int vtpci_init(struct rte_pci_device *, struct virtio_hw *,
- uint32_t *dev_flags);
+int vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw);
void vtpci_reset(struct virtio_hw *);
void vtpci_reinit_complete(struct virtio_hw *);
@@ -331,8 +337,6 @@ void vtpci_read_dev_config(struct virtio_hw *, size_t, void *, int);
uint8_t vtpci_isr(struct virtio_hw *);
-uint16_t vtpci_irq_config(struct virtio_hw *, uint16_t);
-
extern const struct virtio_pci_ops legacy_ops;
extern const struct virtio_pci_ops modern_ops;
extern const struct virtio_pci_ops virtio_user_ops;
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index a33ef1a8..fbc96dfb 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -72,6 +72,15 @@
#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
ETH_TXQ_FLAGS_NOOFFLOADS)
+int
+virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
+{
+ struct virtnet_rx *rxvq = rxq;
+ struct virtqueue *vq = rxvq->vq;
+
+ return VIRTQUEUE_NUSED(vq) >= offset;
+}
+
static void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
@@ -124,7 +133,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
if (unlikely(cookie == NULL)) {
- PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
+ PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
vq->vq_used_cons_idx);
break;
}
@@ -716,7 +725,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
- struct virtio_hw *hw;
+ struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm, *new_mbuf;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
@@ -727,6 +736,10 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
int offload;
struct virtio_net_hdr *hdr;
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb();
@@ -739,8 +752,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
- hw = vq->hw;
- nb_rx = 0;
nb_enqueued = 0;
hdr_size = hw->vtnet_hdr_size;
offload = rx_offload_enabled(hw);
@@ -763,8 +774,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
- rxm->nb_segs = 1;
- rxm->next = NULL;
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
@@ -784,7 +793,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rx_pkts[nb_rx++] = rxm;
- rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len;
+ rxvq->stats.bytes += rxm->pkt_len;
virtio_update_packet_stats(&rxvq->stats, rxm);
}
@@ -827,7 +836,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
{
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
- struct virtio_hw *hw;
+ struct virtio_hw *hw = vq->hw;
struct rte_mbuf *rxm, *new_mbuf;
uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
@@ -841,14 +850,16 @@ virtio_recv_mergeable_pkts(void *rx_queue,
uint32_t hdr_size;
int offload;
+ nb_rx = 0;
+ if (unlikely(hw->started == 0))
+ return nb_rx;
+
nb_used = VIRTQUEUE_NUSED(vq);
virtio_rmb();
PMD_RX_LOG(DEBUG, "used:%d", nb_used);
- hw = vq->hw;
- nb_rx = 0;
i = 0;
nb_enqueued = 0;
seg_num = 0;
@@ -891,7 +902,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->nb_segs = seg_num;
- rxm->next = NULL;
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
@@ -936,7 +946,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
rxm = rcv_pkts[extra_idx];
rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
- rxm->next = NULL;
rxm->pkt_len = (uint32_t)(len[extra_idx]);
rxm->data_len = (uint16_t)(len[extra_idx]);
@@ -1000,9 +1009,12 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
struct virtqueue *vq = txvq->vq;
struct virtio_hw *hw = vq->hw;
uint16_t hdr_size = hw->vtnet_hdr_size;
- uint16_t nb_used, nb_tx;
+ uint16_t nb_used, nb_tx = 0;
int error;
+ if (unlikely(hw->started == 0))
+ return nb_tx;
+
if (unlikely(nb_pkts < 1))
return nb_pkts;
@@ -1027,7 +1039,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
}
/* optimize ring usage */
- if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) &&
+ if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
rte_mbuf_refcnt_read(txm) == 1 &&
RTE_MBUF_DIRECT(txm) &&
txm->nb_segs == 1 &&
diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c
index b651e53b..542cf805 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.c
+++ b/drivers/net/virtio/virtio_rxtx_simple.c
@@ -89,12 +89,17 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
{
struct virtnet_tx *txvq = tx_queue;
struct virtqueue *vq = txvq->vq;
+ struct virtio_hw *hw = vq->hw;
uint16_t nb_used;
uint16_t desc_idx;
struct vring_desc *start_dp;
uint16_t nb_tail, nb_commit;
int i;
uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
+ uint16_t nb_tx = 0;
+
+ if (unlikely(hw->started == 0))
+ return nb_tx;
nb_used = VIRTQUEUE_NUSED(vq);
rte_compiler_barrier();
diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h
index b08f8594..f531c542 100644
--- a/drivers/net/virtio/virtio_rxtx_simple.h
+++ b/drivers/net/virtio/virtio_rxtx_simple.h
@@ -98,13 +98,13 @@ virtio_xmit_cleanup(struct virtqueue *vq)
desc_idx = (uint16_t)(vq->vq_used_cons_idx &
((vq->vq_nentries >> 1) - 1));
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = __rte_pktmbuf_prefree_seg(m);
+ m = rte_pktmbuf_prefree_seg(m);
if (likely(m != NULL)) {
free[0] = m;
nb_free = 1;
for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = __rte_pktmbuf_prefree_seg(m);
+ m = rte_pktmbuf_prefree_seg(m);
if (likely(m != NULL)) {
if (likely(m->pool == free[0]->pool))
free[nb_free++] = m;
@@ -123,7 +123,7 @@ virtio_xmit_cleanup(struct virtqueue *vq)
} else {
for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
- m = __rte_pktmbuf_prefree_seg(m);
+ m = rte_pktmbuf_prefree_seg(m);
if (m != NULL)
rte_mempool_put(m->pool, m);
}
diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c
index 793eefbe..ecc62ada 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_neon.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c
@@ -72,12 +72,13 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
{
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
uint16_t nb_used;
uint16_t desc_idx;
struct vring_used_elem *rused;
struct rte_mbuf **sw_ring;
struct rte_mbuf **sw_ring_end;
- uint16_t nb_pkts_received;
+ uint16_t nb_pkts_received = 0;
uint8x16_t shuf_msk1 = {
0xFF, 0xFF, 0xFF, 0xFF, /* packet type */
@@ -106,6 +107,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
0, 0
};
+ if (unlikely(hw->started == 0))
+ return nb_pkts_received;
+
if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
return 0;
diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c
index 87bb5c63..7cf0f8b8 100644
--- a/drivers/net/virtio/virtio_rxtx_simple_sse.c
+++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c
@@ -74,12 +74,13 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
{
struct virtnet_rx *rxvq = rx_queue;
struct virtqueue *vq = rxvq->vq;
+ struct virtio_hw *hw = vq->hw;
uint16_t nb_used;
uint16_t desc_idx;
struct vring_used_elem *rused;
struct rte_mbuf **sw_ring;
struct rte_mbuf **sw_ring_end;
- uint16_t nb_pkts_received;
+ uint16_t nb_pkts_received = 0;
__m128i shuf_msk1, shuf_msk2, len_adjust;
shuf_msk1 = _mm_set_epi8(
@@ -109,6 +110,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
0, (uint16_t)-vq->hw->vtnet_hdr_size,
0, 0);
+ if (unlikely(hw->started == 0))
+ return nb_pkts_received;
+
if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
return 0;
diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h
index 7adb55f5..5c983bd4 100644
--- a/drivers/net/virtio/virtio_user/vhost.h
+++ b/drivers/net/virtio/virtio_user/vhost.h
@@ -42,8 +42,6 @@
#include "../virtio_logs.h"
#include "../virtqueue.h"
-#define VHOST_MEMORY_MAX_NREGIONS 8
-
struct vhost_vring_state {
unsigned int index;
unsigned int num;
@@ -98,6 +96,8 @@ enum vhost_user_request {
VHOST_USER_MAX
};
+const char * const vhost_msg_strings[VHOST_USER_MAX];
+
struct vhost_memory_region {
uint64_t guest_phys_addr;
uint64_t memory_size; /* bytes */
@@ -105,42 +105,19 @@ struct vhost_memory_region {
uint64_t mmap_offset;
};
-struct vhost_memory {
- uint32_t nregions;
- uint32_t padding;
- struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS];
-};
-
-struct vhost_user_msg {
- enum vhost_user_request request;
+struct virtio_user_dev;
-#define VHOST_USER_VERSION_MASK 0x3
-#define VHOST_USER_REPLY_MASK (0x1 << 2)
- uint32_t flags;
- uint32_t size; /* the following payload size */
- union {
-#define VHOST_USER_VRING_IDX_MASK 0xff
-#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
- uint64_t u64;
- struct vhost_vring_state state;
- struct vhost_vring_addr addr;
- struct vhost_memory memory;
- } payload;
- int fds[VHOST_MEMORY_MAX_NREGIONS];
-} __attribute((packed));
-
-#define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64)
-#define VHOST_USER_PAYLOAD_SIZE \
- (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE)
-
-/* The version of the protocol we support */
-#define VHOST_USER_VERSION 0x1
-
-#define VHOST_USER_F_PROTOCOL_FEATURES 30
-#define VHOST_USER_MQ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)
+struct virtio_user_backend_ops {
+ int (*setup)(struct virtio_user_dev *dev);
+ int (*send_request)(struct virtio_user_dev *dev,
+ enum vhost_user_request req,
+ void *arg);
+ int (*enable_qp)(struct virtio_user_dev *dev,
+ uint16_t pair_idx,
+ int enable);
+};
-int vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg);
-int vhost_user_setup(const char *path);
-int vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable);
+struct virtio_user_backend_ops ops_user;
+struct virtio_user_backend_ops ops_kernel;
#endif
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel.c b/drivers/net/virtio/virtio_user/vhost_kernel.c
new file mode 100644
index 00000000..68d28b13
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/vhost_kernel.c
@@ -0,0 +1,403 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include <rte_memory.h>
+#include <rte_eal_memconfig.h>
+
+#include "vhost.h"
+#include "virtio_user_dev.h"
+#include "vhost_kernel_tap.h"
+
+struct vhost_memory_kernel {
+ uint32_t nregions;
+ uint32_t padding;
+ struct vhost_memory_region regions[0];
+};
+
+/* vhost kernel ioctls */
+#define VHOST_VIRTIO 0xAF
+#define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
+#define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
+#define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
+#define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory_kernel)
+#define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
+#define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
+#define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
+#define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
+#define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
+#define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
+#define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
+#define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
+#define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
+
+static uint64_t max_regions = 64;
+
+static void
+get_vhost_kernel_max_regions(void)
+{
+ int fd;
+ char buf[20] = {'\0'};
+
+ fd = open("/sys/module/vhost/parameters/max_mem_regions", O_RDONLY);
+ if (fd < 0)
+ return;
+
+ if (read(fd, buf, sizeof(buf) - 1) > 0)
+ max_regions = strtoull(buf, NULL, 10);
+
+ close(fd);
+}
+
+static uint64_t vhost_req_user_to_kernel[] = {
+ [VHOST_USER_SET_OWNER] = VHOST_SET_OWNER,
+ [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
+ [VHOST_USER_SET_FEATURES] = VHOST_SET_FEATURES,
+ [VHOST_USER_GET_FEATURES] = VHOST_GET_FEATURES,
+ [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
+ [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
+ [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
+ [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
+ [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
+ [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
+ [VHOST_USER_SET_MEM_TABLE] = VHOST_SET_MEM_TABLE,
+};
+
+/* By default, vhost kernel module allows 64 regions, but DPDK allows
+ * 256 segments. As a relief, below function merges those virtually
+ * adjacent memsegs into one region.
+ */
+static struct vhost_memory_kernel *
+prepare_vhost_memory_kernel(void)
+{
+ uint32_t i, j, k = 0;
+ struct rte_memseg *seg;
+ struct vhost_memory_region *mr;
+ struct vhost_memory_kernel *vm;
+
+ vm = malloc(sizeof(struct vhost_memory_kernel) +
+ max_regions *
+ sizeof(struct vhost_memory_region));
+ if (!vm)
+ return NULL;
+
+ for (i = 0; i < RTE_MAX_MEMSEG; ++i) {
+ seg = &rte_eal_get_configuration()->mem_config->memseg[i];
+ if (!seg->addr)
+ break;
+
+ int new_region = 1;
+
+ for (j = 0; j < k; ++j) {
+ mr = &vm->regions[j];
+
+ if (mr->userspace_addr + mr->memory_size ==
+ (uint64_t)(uintptr_t)seg->addr) {
+ mr->memory_size += seg->len;
+ new_region = 0;
+ break;
+ }
+
+ if ((uint64_t)(uintptr_t)seg->addr + seg->len ==
+ mr->userspace_addr) {
+ mr->guest_phys_addr =
+ (uint64_t)(uintptr_t)seg->addr;
+ mr->userspace_addr =
+ (uint64_t)(uintptr_t)seg->addr;
+ mr->memory_size += seg->len;
+ new_region = 0;
+ break;
+ }
+ }
+
+ if (new_region == 0)
+ continue;
+
+ mr = &vm->regions[k++];
+ /* use vaddr here! */
+ mr->guest_phys_addr = (uint64_t)(uintptr_t)seg->addr;
+ mr->userspace_addr = (uint64_t)(uintptr_t)seg->addr;
+ mr->memory_size = seg->len;
+ mr->mmap_offset = 0;
+
+ if (k >= max_regions) {
+ free(vm);
+ return NULL;
+ }
+ }
+
+ vm->nregions = k;
+ vm->padding = 0;
+ return vm;
+}
+
+/* with below features, vhost kernel does not need to do the checksum and TSO,
+ * these info will be passed to virtio_user through virtio net header.
+ */
+#define VHOST_KERNEL_GUEST_OFFLOADS_MASK \
+ ((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UFO))
+
+/* with below features, when flows from virtio_user to vhost kernel
+ * (1) if flows goes up through the kernel networking stack, it does not need
+ * to verify checksum, which can save CPU cycles;
+ * (2) if flows goes through a Linux bridge and outside from an interface
+ * (kernel driver), checksum and TSO will be done by GSO in kernel or even
+ * offloaded into real physical device.
+ */
+#define VHOST_KERNEL_HOST_OFFLOADS_MASK \
+ ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_CSUM))
+
+static int
+tap_supporte_mq(void)
+{
+ int tapfd;
+ unsigned int tap_features;
+
+ tapfd = open(PATH_NET_TUN, O_RDWR);
+ if (tapfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open %s: %s",
+ PATH_NET_TUN, strerror(errno));
+ return -1;
+ }
+
+ if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) {
+ PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
+ close(tapfd);
+ return -1;
+ }
+
+ close(tapfd);
+ return tap_features & IFF_MULTI_QUEUE;
+}
+
+static int
+vhost_kernel_ioctl(struct virtio_user_dev *dev,
+ enum vhost_user_request req,
+ void *arg)
+{
+ int ret = -1;
+ unsigned int i;
+ uint64_t req_kernel;
+ struct vhost_memory_kernel *vm = NULL;
+ int vhostfd;
+ unsigned int queue_sel;
+
+ PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
+
+ req_kernel = vhost_req_user_to_kernel[req];
+
+ if (req_kernel == VHOST_SET_MEM_TABLE) {
+ vm = prepare_vhost_memory_kernel();
+ if (!vm)
+ return -1;
+ arg = (void *)vm;
+ }
+
+ if (req_kernel == VHOST_SET_FEATURES) {
+ /* We don't need memory protection here */
+ *(uint64_t *)arg &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
+
+ /* VHOST kernel does not know about below flags */
+ *(uint64_t *)arg &= ~VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+ *(uint64_t *)arg &= ~VHOST_KERNEL_HOST_OFFLOADS_MASK;
+
+ *(uint64_t *)arg &= ~(1ULL << VIRTIO_NET_F_MQ);
+ }
+
+ switch (req_kernel) {
+ case VHOST_SET_VRING_NUM:
+ case VHOST_SET_VRING_ADDR:
+ case VHOST_SET_VRING_BASE:
+ case VHOST_GET_VRING_BASE:
+ case VHOST_SET_VRING_KICK:
+ case VHOST_SET_VRING_CALL:
+ queue_sel = *(unsigned int *)arg;
+ vhostfd = dev->vhostfds[queue_sel / 2];
+ *(unsigned int *)arg = queue_sel % 2;
+ PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
+ vhostfd, *(unsigned int *)arg);
+ break;
+ default:
+ vhostfd = -1;
+ }
+ if (vhostfd == -1) {
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ if (dev->vhostfds[i] < 0)
+ continue;
+
+ ret = ioctl(dev->vhostfds[i], req_kernel, arg);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ ret = ioctl(vhostfd, req_kernel, arg);
+ }
+
+ if (!ret && req_kernel == VHOST_GET_FEATURES) {
+ /* with tap as the backend, all these features are supported
+ * but not claimed by vhost-net, so we add them back when
+ * reporting to upper layer.
+ */
+ *((uint64_t *)arg) |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
+ *((uint64_t *)arg) |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
+
+ /* vhost_kernel will not declare this feature, but it does
+ * support multi-queue.
+ */
+ if (tap_supporte_mq())
+ *(uint64_t *)arg |= (1ull << VIRTIO_NET_F_MQ);
+ }
+
+ if (vm)
+ free(vm);
+
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "%s failed: %s",
+ vhost_msg_strings[req], strerror(errno));
+
+ return ret;
+}
+
+/**
+ * Set up environment to talk with a vhost kernel backend.
+ *
+ * @return
+ * - (-1) if fail to set up;
+ * - (>=0) if successful.
+ */
+static int
+vhost_kernel_setup(struct virtio_user_dev *dev)
+{
+ int vhostfd;
+ uint32_t i;
+
+ get_vhost_kernel_max_regions();
+
+ for (i = 0; i < dev->max_queue_pairs; ++i) {
+ vhostfd = open(dev->path, O_RDWR);
+ if (vhostfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open %s, %s",
+ dev->path, strerror(errno));
+ return -1;
+ }
+
+ dev->vhostfds[i] = vhostfd;
+ }
+
+ return 0;
+}
+
+static int
+vhost_kernel_set_backend(int vhostfd, int tapfd)
+{
+ struct vhost_vring_file f;
+
+ f.fd = tapfd;
+ f.index = 0;
+ if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
+ PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
+ strerror(errno));
+ return -1;
+ }
+
+ f.index = 1;
+ if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
+ PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
+ uint16_t pair_idx,
+ int enable)
+{
+ int hdr_size;
+ int vhostfd;
+ int tapfd;
+ int req_mq = (dev->max_queue_pairs > 1);
+
+ vhostfd = dev->vhostfds[pair_idx];
+
+ if (!enable) {
+ if (dev->tapfds[pair_idx] >= 0) {
+ close(dev->tapfds[pair_idx]);
+ dev->tapfds[pair_idx] = -1;
+ }
+ return vhost_kernel_set_backend(vhostfd, -1);
+ } else if (dev->tapfds[pair_idx] >= 0) {
+ return 0;
+ }
+
+ if ((dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF)) ||
+ (dev->features & (1ULL << VIRTIO_F_VERSION_1)))
+ hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ else
+ hdr_size = sizeof(struct virtio_net_hdr);
+
+ tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq);
+ if (tapfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
+ return -1;
+ }
+
+ if (vhost_kernel_set_backend(vhostfd, tapfd) < 0) {
+ PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
+ close(tapfd);
+ return -1;
+ }
+
+ dev->tapfds[pair_idx] = tapfd;
+ return 0;
+}
+
+struct virtio_user_backend_ops ops_kernel = {
+ .setup = vhost_kernel_setup,
+ .send_request = vhost_kernel_ioctl,
+ .enable_qp = vhost_kernel_enable_queue_pair
+};
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.c b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
new file mode 100644
index 00000000..f585de8c
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.c
@@ -0,0 +1,133 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <net/if.h>
+#include <errno.h>
+#include <string.h>
+#include <limits.h>
+
+#include "vhost_kernel_tap.h"
+#include "../virtio_logs.h"
+
+int
+vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq)
+{
+ unsigned int tap_features;
+ int sndbuf = INT_MAX;
+ struct ifreq ifr;
+ int tapfd;
+ unsigned int offload =
+ TUN_F_CSUM |
+ TUN_F_TSO4 |
+ TUN_F_TSO6 |
+ TUN_F_TSO_ECN |
+ TUN_F_UFO;
+
+ /* TODO:
+ * 1. verify we can get/set vnet_hdr_len, tap_probe_vnet_hdr_len
+ * 2. get number of memory regions from vhost module parameter
+ * max_mem_regions, supported in newer version linux kernel
+ */
+ tapfd = open(PATH_NET_TUN, O_RDWR);
+ if (tapfd < 0) {
+ PMD_DRV_LOG(ERR, "fail to open %s: %s",
+ PATH_NET_TUN, strerror(errno));
+ return -1;
+ }
+
+ /* Construct ifr */
+ memset(&ifr, 0, sizeof(ifr));
+ ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
+
+ if (ioctl(tapfd, TUNGETFEATURES, &tap_features) == -1) {
+ PMD_DRV_LOG(ERR, "TUNGETFEATURES failed: %s", strerror(errno));
+ goto error;
+ }
+ if (tap_features & IFF_ONE_QUEUE)
+ ifr.ifr_flags |= IFF_ONE_QUEUE;
+
+ /* Let tap instead of vhost-net handle vnet header, as the latter does
+ * not support offloading. And in this case, we should not set feature
+ * bit VHOST_NET_F_VIRTIO_NET_HDR.
+ */
+ if (tap_features & IFF_VNET_HDR) {
+ ifr.ifr_flags |= IFF_VNET_HDR;
+ } else {
+ PMD_DRV_LOG(ERR, "TAP does not support IFF_VNET_HDR");
+ goto error;
+ }
+
+ if (req_mq)
+ ifr.ifr_flags |= IFF_MULTI_QUEUE;
+
+ if (*p_ifname)
+ strncpy(ifr.ifr_name, *p_ifname, IFNAMSIZ);
+ else
+ strncpy(ifr.ifr_name, "tap%d", IFNAMSIZ);
+ if (ioctl(tapfd, TUNSETIFF, (void *)&ifr) == -1) {
+ PMD_DRV_LOG(ERR, "TUNSETIFF failed: %s", strerror(errno));
+ goto error;
+ }
+
+ fcntl(tapfd, F_SETFL, O_NONBLOCK);
+
+ if (ioctl(tapfd, TUNSETVNETHDRSZ, &hdr_size) < 0) {
+ PMD_DRV_LOG(ERR, "TUNSETVNETHDRSZ failed: %s", strerror(errno));
+ goto error;
+ }
+
+ if (ioctl(tapfd, TUNSETSNDBUF, &sndbuf) < 0) {
+ PMD_DRV_LOG(ERR, "TUNSETSNDBUF failed: %s", strerror(errno));
+ goto error;
+ }
+
+ /* TODO: before set the offload capabilities, we'd better (1) check
+ * negotiated features to see if necessary to offload; (2) query tap
+ * to see if it supports the offload capabilities.
+ */
+ if (ioctl(tapfd, TUNSETOFFLOAD, offload) != 0)
+ PMD_DRV_LOG(ERR, "TUNSETOFFLOAD ioctl() failed: %s",
+ strerror(errno));
+
+ if (!(*p_ifname))
+ *p_ifname = strdup(ifr.ifr_name);
+
+ return tapfd;
+error:
+ close(tapfd);
+ return -1;
+}
diff --git a/drivers/net/virtio/virtio_user/vhost_kernel_tap.h b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
new file mode 100644
index 00000000..eae340cc
--- /dev/null
+++ b/drivers/net/virtio/virtio_user/vhost_kernel_tap.h
@@ -0,0 +1,67 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/ioctl.h>
+
+/* TUN ioctls */
+#define TUNSETIFF _IOW('T', 202, int)
+#define TUNGETFEATURES _IOR('T', 207, unsigned int)
+#define TUNSETOFFLOAD _IOW('T', 208, unsigned int)
+#define TUNGETIFF _IOR('T', 210, unsigned int)
+#define TUNSETSNDBUF _IOW('T', 212, int)
+#define TUNGETVNETHDRSZ _IOR('T', 215, int)
+#define TUNSETVNETHDRSZ _IOW('T', 216, int)
+#define TUNSETQUEUE _IOW('T', 217, int)
+#define TUNSETVNETLE _IOW('T', 220, int)
+#define TUNSETVNETBE _IOW('T', 222, int)
+
+/* TUNSETIFF ifr flags */
+#define IFF_TAP 0x0002
+#define IFF_NO_PI 0x1000
+#define IFF_ONE_QUEUE 0x2000
+#define IFF_VNET_HDR 0x4000
+#define IFF_MULTI_QUEUE 0x0100
+#define IFF_ATTACH_QUEUE 0x0200
+#define IFF_DETACH_QUEUE 0x0400
+
+/* Features for GSO (TUNSETOFFLOAD). */
+#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */
+#define TUN_F_TSO4 0x02 /* I can handle TSO for IPv4 packets */
+#define TUN_F_TSO6 0x04 /* I can handle TSO for IPv6 packets */
+#define TUN_F_TSO_ECN 0x08 /* I can handle TSO with ECN bits. */
+#define TUN_F_UFO 0x10 /* I can handle UFO packets */
+
+/* Constants */
+#define PATH_NET_TUN "/dev/net/tun"
+
+int vhost_kernel_open_tap(char **p_ifname, int hdr_size, int req_mq);
diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c
index 082e8217..4ad7b21b 100644
--- a/drivers/net/virtio/virtio_user/vhost_user.c
+++ b/drivers/net/virtio/virtio_user/vhost_user.c
@@ -41,6 +41,39 @@
#include <errno.h>
#include "vhost.h"
+#include "virtio_user_dev.h"
+
+/* The version of the protocol we support */
+#define VHOST_USER_VERSION 0x1
+
+#define VHOST_MEMORY_MAX_NREGIONS 8
+struct vhost_memory {
+ uint32_t nregions;
+ uint32_t padding;
+ struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS];
+};
+
+struct vhost_user_msg {
+ enum vhost_user_request request;
+
+#define VHOST_USER_VERSION_MASK 0x3
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+ union {
+#define VHOST_USER_VRING_IDX_MASK 0xff
+#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
+ uint64_t u64;
+ struct vhost_vring_state state;
+ struct vhost_vring_addr addr;
+ struct vhost_memory memory;
+ } payload;
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+} __attribute((packed));
+
+#define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64)
+#define VHOST_USER_PAYLOAD_SIZE \
+ (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE)
static int
vhost_user_write(int fd, void *buf, int len, int *fds, int fd_num)
@@ -223,24 +256,25 @@ prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
static struct vhost_user_msg m;
-static const char * const vhost_msg_strings[] = {
- [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
- [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
- [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
- [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
- [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
- [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
- [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
- [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
- [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
- [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
- [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
- [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
- NULL,
+const char * const vhost_msg_strings[] = {
+ [VHOST_USER_SET_OWNER] = "VHOST_SET_OWNER",
+ [VHOST_USER_RESET_OWNER] = "VHOST_RESET_OWNER",
+ [VHOST_USER_SET_FEATURES] = "VHOST_SET_FEATURES",
+ [VHOST_USER_GET_FEATURES] = "VHOST_GET_FEATURES",
+ [VHOST_USER_SET_VRING_CALL] = "VHOST_SET_VRING_CALL",
+ [VHOST_USER_SET_VRING_NUM] = "VHOST_SET_VRING_NUM",
+ [VHOST_USER_SET_VRING_BASE] = "VHOST_SET_VRING_BASE",
+ [VHOST_USER_GET_VRING_BASE] = "VHOST_GET_VRING_BASE",
+ [VHOST_USER_SET_VRING_ADDR] = "VHOST_SET_VRING_ADDR",
+ [VHOST_USER_SET_VRING_KICK] = "VHOST_SET_VRING_KICK",
+ [VHOST_USER_SET_MEM_TABLE] = "VHOST_SET_MEM_TABLE",
+ [VHOST_USER_SET_VRING_ENABLE] = "VHOST_SET_VRING_ENABLE",
};
-int
-vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg)
+static int
+vhost_user_sock(struct virtio_user_dev *dev,
+ enum vhost_user_request req,
+ void *arg)
{
struct vhost_user_msg msg;
struct vhost_vring_file *file = 0;
@@ -248,9 +282,9 @@ vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg)
int fds[VHOST_MEMORY_MAX_NREGIONS];
int fd_num = 0;
int i, len;
+ int vhostfd = dev->vhostfd;
RTE_SET_USED(m);
- RTE_SET_USED(vhost_msg_strings);
PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
@@ -371,15 +405,13 @@ vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg)
/**
* Set up environment to talk with a vhost user backend.
- * @param path
- * - The path to vhost user unix socket file.
*
* @return
- * - (-1) if fail to set up;
- * - (>=0) if successful, and it is the fd to vhostfd.
+ * - (-1) if fail;
+ * - (0) if succeed.
*/
-int
-vhost_user_setup(const char *path)
+static int
+vhost_user_setup(struct virtio_user_dev *dev)
{
int fd;
int flag;
@@ -397,18 +429,21 @@ vhost_user_setup(const char *path)
memset(&un, 0, sizeof(un));
un.sun_family = AF_UNIX;
- snprintf(un.sun_path, sizeof(un.sun_path), "%s", path);
+ snprintf(un.sun_path, sizeof(un.sun_path), "%s", dev->path);
if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
close(fd);
return -1;
}
- return fd;
+ dev->vhostfd = fd;
+ return 0;
}
-int
-vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable)
+static int
+vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
+ uint16_t pair_idx,
+ int enable)
{
int i;
@@ -418,10 +453,15 @@ vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable)
.num = enable,
};
- if (vhost_user_sock(vhostfd,
- VHOST_USER_SET_VRING_ENABLE, &state))
+ if (vhost_user_sock(dev, VHOST_USER_SET_VRING_ENABLE, &state))
return -1;
}
return 0;
}
+
+struct virtio_user_backend_ops ops_user = {
+ .setup = vhost_user_setup,
+ .send_request = vhost_user_sock,
+ .enable_qp = vhost_user_enable_queue_pair
+};
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c
index a38398b8..450404ba 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.c
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c
@@ -39,6 +39,9 @@
#include <sys/mman.h>
#include <unistd.h>
#include <sys/eventfd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
#include "vhost.h"
#include "virtio_user_dev.h"
@@ -51,21 +54,11 @@ virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
* firstly because vhost depends on this msg to allocate virtqueue
* pair.
*/
- int callfd;
struct vhost_vring_file file;
- /* May use invalid flag, but some backend leverages kickfd and callfd as
- * criteria to judge if dev is alive. so finally we use real event_fd.
- */
- callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
- if (callfd < 0) {
- PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno));
- return -1;
- }
file.index = queue_sel;
- file.fd = callfd;
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_CALL, &file);
- dev->callfds[queue_sel] = callfd;
+ file.fd = dev->callfds[queue_sel];
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_CALL, &file);
return 0;
}
@@ -73,7 +66,6 @@ virtio_user_create_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
static int
virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
{
- int kickfd;
struct vhost_vring_file file;
struct vhost_vring_state state;
struct vring *vring = &dev->vrings[queue_sel];
@@ -88,26 +80,21 @@ virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel)
state.index = queue_sel;
state.num = vring->num;
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_NUM, &state);
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
+ state.index = queue_sel;
state.num = 0; /* no reservation */
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_BASE, &state);
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_BASE, &state);
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_ADDR, &addr);
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_ADDR, &addr);
/* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes
* lastly because vhost depends on this msg to judge if
* virtio is ready.
*/
- kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
- if (kickfd < 0) {
- PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno));
- return -1;
- }
file.index = queue_sel;
- file.fd = kickfd;
- vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_KICK, &file);
- dev->kickfds[queue_sel] = kickfd;
+ file.fd = dev->kickfds[queue_sel];
+ dev->ops->send_request(dev, VHOST_USER_SET_VRING_KICK, &file);
return 0;
}
@@ -146,22 +133,20 @@ virtio_user_start_device(struct virtio_user_dev *dev)
if (virtio_user_queue_setup(dev, virtio_user_create_queue) < 0)
goto error;
- /* Step 1: set features
- * Make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is enabled,
- * VIRTIO_NET_F_MAC and VIRTIO_NET_F_CTRL_VQ is stripped.
- */
+ /* Step 1: set features */
features = dev->features;
- if (dev->max_queue_pairs > 1)
- features |= VHOST_USER_MQ;
+ /* Strip VIRTIO_NET_F_MAC, as MAC address is handled in vdev init */
features &= ~(1ull << VIRTIO_NET_F_MAC);
+ /* Strip VIRTIO_NET_F_CTRL_VQ, as devices do not really need to know */
features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ);
- ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_FEATURES, &features);
+ features &= ~(1ull << VIRTIO_NET_F_STATUS);
+ ret = dev->ops->send_request(dev, VHOST_USER_SET_FEATURES, &features);
if (ret < 0)
goto error;
PMD_DRV_LOG(INFO, "set features: %" PRIx64, features);
/* Step 2: share memory regions */
- ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_MEM_TABLE, NULL);
+ ret = dev->ops->send_request(dev, VHOST_USER_SET_MEM_TABLE, NULL);
if (ret < 0)
goto error;
@@ -172,7 +157,7 @@ virtio_user_start_device(struct virtio_user_dev *dev)
/* Step 4: enable queues
* we enable the 1st queue pair by default.
*/
- vhost_user_enable_queue_pair(dev->vhostfd, 0, 1);
+ dev->ops->enable_qp(dev, 0, 1);
return 0;
error:
@@ -184,13 +169,8 @@ int virtio_user_stop_device(struct virtio_user_dev *dev)
{
uint32_t i;
- for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
- close(dev->callfds[i]);
- close(dev->kickfds[i]);
- }
-
for (i = 0; i < dev->max_queue_pairs; ++i)
- vhost_user_enable_queue_pair(dev->vhostfd, i, 0);
+ dev->ops->enable_qp(dev, i, 0);
return 0;
}
@@ -217,35 +197,170 @@ parse_mac(struct virtio_user_dev *dev, const char *mac)
}
int
-virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac)
+is_vhost_user_by_type(const char *path)
+{
+ struct stat sb;
+
+ if (stat(path, &sb) == -1)
+ return 0;
+
+ return S_ISSOCK(sb.st_mode);
+}
+
+static int
+virtio_user_dev_init_notify(struct virtio_user_dev *dev)
+{
+ uint32_t i, j;
+ int callfd;
+ int kickfd;
+
+ for (i = 0; i < VIRTIO_MAX_VIRTQUEUES; ++i) {
+ if (i >= dev->max_queue_pairs * 2) {
+ dev->kickfds[i] = -1;
+ dev->callfds[i] = -1;
+ continue;
+ }
+
+ /* May use invalid flag, but some backend uses kickfd and
+ * callfd as criteria to judge if dev is alive. so finally we
+ * use real event_fd.
+ */
+ callfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (callfd < 0) {
+ PMD_DRV_LOG(ERR, "callfd error, %s", strerror(errno));
+ break;
+ }
+ kickfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
+ if (kickfd < 0) {
+ PMD_DRV_LOG(ERR, "kickfd error, %s", strerror(errno));
+ break;
+ }
+ dev->callfds[i] = callfd;
+ dev->kickfds[i] = kickfd;
+ }
+
+ if (i < VIRTIO_MAX_VIRTQUEUES) {
+ for (j = 0; j <= i; ++j) {
+ close(dev->callfds[j]);
+ close(dev->kickfds[j]);
+ }
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_user_fill_intr_handle(struct virtio_user_dev *dev)
{
uint32_t i;
+ struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
+ if (!eth_dev->intr_handle) {
+ eth_dev->intr_handle = malloc(sizeof(*eth_dev->intr_handle));
+ if (!eth_dev->intr_handle) {
+ PMD_DRV_LOG(ERR, "fail to allocate intr_handle");
+ return -1;
+ }
+ memset(eth_dev->intr_handle, 0, sizeof(*eth_dev->intr_handle));
+ }
+
+ for (i = 0; i < dev->max_queue_pairs; ++i)
+ eth_dev->intr_handle->efds[i] = dev->callfds[i];
+ eth_dev->intr_handle->nb_efd = dev->max_queue_pairs;
+ eth_dev->intr_handle->max_intr = dev->max_queue_pairs + 1;
+ eth_dev->intr_handle->type = RTE_INTR_HANDLE_VDEV;
+ if (dev->vhostfd >= 0)
+ eth_dev->intr_handle->fd = dev->vhostfd;
+
+ return 0;
+}
+
+static int
+virtio_user_dev_setup(struct virtio_user_dev *dev)
+{
+ uint32_t q;
+
+ dev->vhostfd = -1;
+ dev->vhostfds = NULL;
+ dev->tapfds = NULL;
+
+ if (is_vhost_user_by_type(dev->path)) {
+ dev->ops = &ops_user;
+ } else {
+ dev->ops = &ops_kernel;
+
+ dev->vhostfds = malloc(dev->max_queue_pairs * sizeof(int));
+ dev->tapfds = malloc(dev->max_queue_pairs * sizeof(int));
+ if (!dev->vhostfds || !dev->tapfds) {
+ PMD_INIT_LOG(ERR, "Failed to malloc");
+ return -1;
+ }
+
+ for (q = 0; q < dev->max_queue_pairs; ++q) {
+ dev->vhostfds[q] = -1;
+ dev->tapfds[q] = -1;
+ }
+ }
+
+ if (dev->ops->setup(dev) < 0)
+ return -1;
+
+ if (virtio_user_dev_init_notify(dev) < 0)
+ return -1;
+
+ if (virtio_user_fill_intr_handle(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Use below macro to filter features from vhost backend */
+#define VIRTIO_USER_SUPPORTED_FEATURES \
+ (1ULL << VIRTIO_NET_F_MAC | \
+ 1ULL << VIRTIO_NET_F_STATUS | \
+ 1ULL << VIRTIO_NET_F_MQ | \
+ 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR | \
+ 1ULL << VIRTIO_NET_F_CTRL_VQ | \
+ 1ULL << VIRTIO_NET_F_CTRL_RX | \
+ 1ULL << VIRTIO_NET_F_CTRL_VLAN | \
+ 1ULL << VIRTIO_NET_F_CSUM | \
+ 1ULL << VIRTIO_NET_F_HOST_TSO4 | \
+ 1ULL << VIRTIO_NET_F_HOST_TSO6 | \
+ 1ULL << VIRTIO_NET_F_MRG_RXBUF | \
+ 1ULL << VIRTIO_RING_F_INDIRECT_DESC | \
+ 1ULL << VIRTIO_NET_F_GUEST_CSUM | \
+ 1ULL << VIRTIO_NET_F_GUEST_TSO4 | \
+ 1ULL << VIRTIO_NET_F_GUEST_TSO6 | \
+ 1ULL << VIRTIO_F_VERSION_1)
+
+int
+virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
+ int cq, int queue_size, const char *mac, char **ifname)
+{
snprintf(dev->path, PATH_MAX, "%s", path);
dev->max_queue_pairs = queues;
dev->queue_pairs = 1; /* mq disabled by default */
dev->queue_size = queue_size;
dev->mac_specified = 0;
parse_mac(dev, mac);
- dev->vhostfd = -1;
- for (i = 0; i < VIRTIO_MAX_VIRTQUEUES * 2 + 1; ++i) {
- dev->kickfds[i] = -1;
- dev->callfds[i] = -1;
+ if (*ifname) {
+ dev->ifname = *ifname;
+ *ifname = NULL;
}
- dev->vhostfd = vhost_user_setup(dev->path);
- if (dev->vhostfd < 0) {
+ if (virtio_user_dev_setup(dev) < 0) {
PMD_INIT_LOG(ERR, "backend set up fails");
return -1;
}
- if (vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL) < 0) {
+ if (dev->ops->send_request(dev, VHOST_USER_SET_OWNER, NULL) < 0) {
PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno));
return -1;
}
- if (vhost_user_sock(dev->vhostfd, VHOST_USER_GET_FEATURES,
+ if (dev->ops->send_request(dev, VHOST_USER_GET_FEATURES,
&dev->device_features) < 0) {
PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno));
return -1;
@@ -268,12 +383,11 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
dev->device_features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR);
}
- if (dev->max_queue_pairs > 1) {
- if (!(dev->features & VHOST_USER_MQ)) {
- PMD_INIT_LOG(ERR, "MQ not supported by the backend");
- return -1;
- }
- }
+ /* The backend will not report this feature, we add it explicitly */
+ if (is_vhost_user_by_type(dev->path))
+ dev->device_features |= (1ull << VIRTIO_NET_F_STATUS);
+
+ dev->device_features &= VIRTIO_USER_SUPPORTED_FEATURES;
return 0;
}
@@ -281,7 +395,25 @@ virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
void
virtio_user_dev_uninit(struct virtio_user_dev *dev)
{
+ uint32_t i;
+
+ virtio_user_stop_device(dev);
+
+ for (i = 0; i < dev->max_queue_pairs * 2; ++i) {
+ close(dev->callfds[i]);
+ close(dev->kickfds[i]);
+ }
+
close(dev->vhostfd);
+
+ if (dev->vhostfds) {
+ for (i = 0; i < dev->max_queue_pairs; ++i)
+ close(dev->vhostfds[i]);
+ free(dev->vhostfds);
+ free(dev->tapfds);
+ }
+
+ free(dev->ifname);
}
static uint8_t
@@ -297,9 +429,9 @@ virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs)
}
for (i = 0; i < q_pairs; ++i)
- ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 1);
+ ret |= dev->ops->enable_qp(dev, i, 1);
for (i = q_pairs; i < dev->max_queue_pairs; ++i)
- ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 0);
+ ret |= dev->ops->enable_qp(dev, i, 0);
dev->queue_pairs = q_pairs;
diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h
index 28fc788e..8361b6bd 100644
--- a/drivers/net/virtio/virtio_user/virtio_user_dev.h
+++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h
@@ -37,11 +37,20 @@
#include <limits.h>
#include "../virtio_pci.h"
#include "../virtio_ring.h"
+#include "vhost.h"
struct virtio_user_dev {
+ /* for vhost_user backend */
int vhostfd;
- int callfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
- int kickfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
+
+ /* for vhost_kernel backend */
+ char *ifname;
+ int *vhostfds;
+ int *tapfds;
+
+ /* for both vhost_user and vhost_kernel */
+ int callfds[VIRTIO_MAX_VIRTQUEUES];
+ int kickfds[VIRTIO_MAX_VIRTQUEUES];
int mac_specified;
uint32_t max_queue_pairs;
uint32_t queue_pairs;
@@ -51,15 +60,18 @@ struct virtio_user_dev {
*/
uint64_t device_features; /* supported features by device */
uint8_t status;
+ uint8_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
- struct vring vrings[VIRTIO_MAX_VIRTQUEUES * 2 + 1];
+ struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
+ struct virtio_user_backend_ops *ops;
};
+int is_vhost_user_by_type(const char *path);
int virtio_user_start_device(struct virtio_user_dev *dev);
int virtio_user_stop_device(struct virtio_user_dev *dev);
int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues,
- int cq, int queue_size, const char *mac);
+ int cq, int queue_size, const char *mac, char **ifname);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
#endif
diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c
index 013600e4..280406c0 100644
--- a/drivers/net/virtio/virtio_user_ethdev.c
+++ b/drivers/net/virtio/virtio_user_ethdev.c
@@ -34,10 +34,15 @@
#include <stdint.h>
#include <sys/types.h>
#include <unistd.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/socket.h>
#include <rte_malloc.h>
#include <rte_kvargs.h>
+#include <rte_ethdev_vdev.h>
#include <rte_vdev.h>
+#include <rte_alarm.h>
#include "virtio_ethdev.h"
#include "virtio_logs.h"
@@ -50,6 +55,17 @@
((struct virtio_user_dev *)(hw)->virtio_user_dev)
static void
+virtio_user_delayed_handler(void *param)
+{
+ struct virtio_hw *hw = (struct virtio_hw *)param;
+ struct rte_eth_dev *dev = &rte_eth_devices[hw->port_id];
+
+ rte_intr_callback_unregister(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+}
+
+static void
virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
void *dst, int length)
{
@@ -63,8 +79,37 @@ virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset,
return;
}
- if (offset == offsetof(struct virtio_net_config, status))
+ if (offset == offsetof(struct virtio_net_config, status)) {
+ char buf[128];
+
+ if (dev->vhostfd >= 0) {
+ int r;
+ int flags;
+
+ flags = fcntl(dev->vhostfd, F_GETFL);
+ fcntl(dev->vhostfd, F_SETFL, flags | O_NONBLOCK);
+ r = recv(dev->vhostfd, buf, 128, MSG_PEEK);
+ if (r == 0 || (r < 0 && errno != EAGAIN)) {
+ dev->status &= (~VIRTIO_NET_S_LINK_UP);
+ PMD_DRV_LOG(ERR, "virtio-user port %u is down",
+ hw->port_id);
+ /* Only client mode is available now. Once the
+ * connection is broken, it can never be up
+ * again. Besides, this function could be called
+ * in the process of interrupt handling,
+ * callback cannot be unregistered here, set an
+ * alarm to do it.
+ */
+ rte_eal_alarm_set(1,
+ virtio_user_delayed_handler,
+ (void *)hw);
+ } else {
+ dev->status |= VIRTIO_NET_S_LINK_UP;
+ }
+ fcntl(dev->vhostfd, F_SETFL, flags & (~O_NONBLOCK));
+ }
*(uint16_t *)dst = dev->status;
+ }
if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs))
*(uint16_t *)dst = dev->max_queue_pairs;
@@ -82,7 +127,7 @@ virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset,
for (i = 0; i < ETHER_ADDR_LEN; ++i)
dev->mac_addr[i] = ((const uint8_t *)src)[i];
else
- PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d\n",
+ PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d",
offset, length);
}
@@ -135,17 +180,26 @@ virtio_user_set_features(struct virtio_hw *hw, uint64_t features)
static uint8_t
virtio_user_get_isr(struct virtio_hw *hw __rte_unused)
{
- /* When config interrupt happens, driver calls this function to query
- * what kinds of change happen. Interrupt mode not supported for now.
+ /* rxq interrupts and config interrupt are separated in virtio-user,
+ * here we only report config change.
*/
- return 0;
+ return VIRTIO_PCI_ISR_CONFIG;
}
static uint16_t
virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused,
uint16_t vec __rte_unused)
{
- return VIRTIO_MSI_NO_VECTOR;
+ return 0;
+}
+
+static uint16_t
+virtio_user_set_queue_irq(struct virtio_hw *hw __rte_unused,
+ struct virtqueue *vq __rte_unused,
+ uint16_t vec)
+{
+ /* pretend we have done that */
+ return vec;
}
/* This function is to get the queue size, aka, number of descs, of a specified
@@ -212,7 +266,7 @@ virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
}
if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0)
- PMD_DRV_LOG(ERR, "failed to kick backend: %s\n",
+ PMD_DRV_LOG(ERR, "failed to kick backend: %s",
strerror(errno));
}
@@ -226,6 +280,7 @@ const struct virtio_pci_ops virtio_user_ops = {
.set_features = virtio_user_set_features,
.get_isr = virtio_user_get_isr,
.set_config_irq = virtio_user_set_config_irq,
+ .set_queue_irq = virtio_user_set_queue_irq,
.get_queue_num = virtio_user_get_queue_num,
.setup_queue = virtio_user_setup_queue,
.del_queue = virtio_user_del_queue,
@@ -243,6 +298,8 @@ static const char *valid_args[] = {
VIRTIO_USER_ARG_PATH,
#define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size"
VIRTIO_USER_ARG_QUEUE_SIZE,
+#define VIRTIO_USER_ARG_INTERFACE_NAME "iface"
+ VIRTIO_USER_ARG_INTERFACE_NAME,
NULL
};
@@ -259,6 +316,9 @@ get_string_arg(const char *key __rte_unused,
*(char **)extra_args = strdup(value);
+ if (!*(char **)extra_args)
+ return -ENOMEM;
+
return 0;
}
@@ -274,28 +334,24 @@ get_integer_arg(const char *key __rte_unused,
return 0;
}
+static struct rte_vdev_driver virtio_user_driver;
+
static struct rte_eth_dev *
-virtio_user_eth_dev_alloc(const char *name)
+virtio_user_eth_dev_alloc(struct rte_vdev_device *vdev)
{
struct rte_eth_dev *eth_dev;
struct rte_eth_dev_data *data;
struct virtio_hw *hw;
struct virtio_user_dev *dev;
- eth_dev = rte_eth_dev_allocate(name);
+ eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*hw));
if (!eth_dev) {
PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev");
return NULL;
}
data = eth_dev->data;
-
- hw = rte_zmalloc(NULL, sizeof(*hw), 0);
- if (!hw) {
- PMD_INIT_LOG(ERR, "malloc virtio_hw failed");
- rte_eth_dev_release_port(eth_dev);
- return NULL;
- }
+ hw = eth_dev->data->dev_private;
dev = rte_zmalloc(NULL, sizeof(*dev), 0);
if (!dev) {
@@ -306,17 +362,17 @@ virtio_user_eth_dev_alloc(const char *name)
}
hw->port_id = data->port_id;
+ dev->port_id = data->port_id;
virtio_hw_internal[hw->port_id].vtpci_ops = &virtio_user_ops;
- hw->use_msix = 0;
+ /*
+ * MSIX is required to enable LSC (see virtio_init_device).
+ * Here just pretend that we support msix.
+ */
+ hw->use_msix = 1;
hw->modern = 0;
hw->use_simple_rxtx = 0;
hw->virtio_user_dev = dev;
- data->dev_private = hw;
- data->numa_node = SOCKET_ID_ANY;
- data->kdrv = RTE_KDRV_NONE;
data->dev_flags = RTE_ETH_DEV_DETACHABLE;
- eth_dev->pci_dev = NULL;
- eth_dev->driver = NULL;
return eth_dev;
}
@@ -336,7 +392,7 @@ virtio_user_eth_dev_free(struct rte_eth_dev *eth_dev)
* Returns 0 on success.
*/
static int
-virtio_user_pmd_probe(const char *name, const char *params)
+virtio_user_pmd_probe(struct rte_vdev_device *dev)
{
struct rte_kvargs *kvlist = NULL;
struct rte_eth_dev *eth_dev;
@@ -345,16 +401,11 @@ virtio_user_pmd_probe(const char *name, const char *params)
uint64_t cq = VIRTIO_USER_DEF_CQ_EN;
uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ;
char *path = NULL;
+ char *ifname = NULL;
char *mac_addr = NULL;
int ret = -1;
- if (!params || params[0] == '\0') {
- PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
- VIRTIO_USER_ARG_QUEUE_SIZE);
- goto end;
- }
-
- kvlist = rte_kvargs_parse(params, valid_args);
+ kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_args);
if (!kvlist) {
PMD_INIT_LOG(ERR, "error when parsing param");
goto end;
@@ -368,11 +419,27 @@ virtio_user_pmd_probe(const char *name, const char *params)
goto end;
}
} else {
- PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user\n",
+ PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio_user",
VIRTIO_USER_ARG_QUEUE_SIZE);
goto end;
}
+ if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME) == 1) {
+ if (is_vhost_user_by_type(path)) {
+ PMD_INIT_LOG(ERR,
+ "arg %s applies only to vhost-kernel backend",
+ VIRTIO_USER_ARG_INTERFACE_NAME);
+ goto end;
+ }
+
+ if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_INTERFACE_NAME,
+ &get_string_arg, &ifname) < 0) {
+ PMD_INIT_LOG(ERR, "error to parse %s",
+ VIRTIO_USER_ARG_INTERFACE_NAME);
+ goto end;
+ }
+ }
+
if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) {
if (rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC,
&get_string_arg, &mac_addr) < 0) {
@@ -416,21 +483,34 @@ virtio_user_pmd_probe(const char *name, const char *params)
goto end;
}
- eth_dev = virtio_user_eth_dev_alloc(name);
- if (!eth_dev) {
- PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
+ if (queues > VIRTIO_MAX_VIRTQUEUE_PAIRS) {
+ PMD_INIT_LOG(ERR, "arg %s %" PRIu64 " exceeds the limit %u",
+ VIRTIO_USER_ARG_QUEUES_NUM, queues,
+ VIRTIO_MAX_VIRTQUEUE_PAIRS);
goto end;
}
- hw = eth_dev->data->dev_private;
- if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
- queue_size, mac_addr) < 0) {
- PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
- virtio_user_eth_dev_free(eth_dev);
- goto end;
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ eth_dev = virtio_user_eth_dev_alloc(dev);
+ if (!eth_dev) {
+ PMD_INIT_LOG(ERR, "virtio_user fails to alloc device");
+ goto end;
+ }
+
+ hw = eth_dev->data->dev_private;
+ if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq,
+ queue_size, mac_addr, &ifname) < 0) {
+ PMD_INIT_LOG(ERR, "virtio_user_dev_init fails");
+ virtio_user_eth_dev_free(eth_dev);
+ goto end;
+ }
+ } else {
+ eth_dev = rte_eth_dev_attach_secondary(rte_vdev_device_name(dev));
+ if (!eth_dev)
+ goto end;
}
- /* previously called by rte_eal_pci_probe() for physical dev */
+ /* previously called by rte_pci_probe() for physical dev */
if (eth_virtio_dev_init(eth_dev) < 0) {
PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
virtio_user_eth_dev_free(eth_dev);
@@ -445,21 +525,25 @@ end:
free(path);
if (mac_addr)
free(mac_addr);
+ if (ifname)
+ free(ifname);
return ret;
}
/** Called by rte_eth_dev_detach() */
static int
-virtio_user_pmd_remove(const char *name)
+virtio_user_pmd_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
struct rte_eth_dev *eth_dev;
struct virtio_hw *hw;
struct virtio_user_dev *dev;
- if (!name)
+ if (!vdev)
return -EINVAL;
- PMD_DRV_LOG(INFO, "Un-Initializing %s\n", name);
+ name = rte_vdev_device_name(vdev);
+ PMD_DRV_LOG(INFO, "Un-Initializing %s", name);
eth_dev = rte_eth_dev_allocated(name);
if (!eth_dev)
return -ENODEV;
@@ -490,4 +574,5 @@ RTE_PMD_REGISTER_PARAM_STRING(net_virtio_user,
"mac=<mac addr> "
"cq=<int> "
"queue_size=<int> "
- "queues=<int>");
+ "queues=<int> "
+ "iface=<string>");
diff --git a/drivers/net/virtio/virtqueue.c b/drivers/net/virtio/virtqueue.c
index 7f60e3ef..9ad77b8a 100644
--- a/drivers/net/virtio/virtqueue.c
+++ b/drivers/net/virtio/virtqueue.c
@@ -38,17 +38,6 @@
#include "virtio_logs.h"
#include "virtio_pci.h"
-void
-virtqueue_disable_intr(struct virtqueue *vq)
-{
- /*
- * Set VRING_AVAIL_F_NO_INTERRUPT to hint host
- * not to interrupt when it consumes packets
- * Note: this is only considered a hint to the host
- */
- vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
-}
-
/*
* Two types of mbuf to be cleaned:
* 1) mbuf that has been consumed by backend but not used by virtio.
diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h
index b1070e05..2e120861 100644
--- a/drivers/net/virtio/virtqueue.h
+++ b/drivers/net/virtio/virtqueue.h
@@ -71,8 +71,14 @@ struct rte_mbuf;
/**
* Return the physical address (or virtual address in case of
* virtio-user) of mbuf data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
*/
-#define VIRTIO_MBUF_ADDR(mb, vq) (*(uint64_t *)((uintptr_t)(mb) + (vq)->offset))
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+ ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
#else
#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_physaddr)
#endif
@@ -274,7 +280,21 @@ vring_desc_init(struct vring_desc *dp, uint16_t n)
/**
* Tell the backend not to interrupt us.
*/
-void virtqueue_disable_intr(struct virtqueue *vq);
+static inline void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+/**
+ * Tell the backend to interrupt us.
+ */
+static inline void
+virtqueue_enable_intr(struct virtqueue *vq)
+{
+ vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+}
+
/**
* Dump virtqueue internal structures, for debug purpose only.
*/
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index 23ff1da2..84356ae2 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -76,9 +76,4 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_rxtx.c
SRCS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3_ethdev.c
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += lib/librte_net
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h
index 68ae8b6d..bfa9622d 100644
--- a/drivers/net/vmxnet3/base/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h
@@ -109,6 +109,9 @@ typedef enum {
VMXNET3_CMD_STOP_EMULATION,
VMXNET3_CMD_LOAD_PLUGIN,
VMXNET3_CMD_ACTIVATE_VF,
+ VMXNET3_CMD_RESERVED3,
+ VMXNET3_CMD_RESERVED4,
+ VMXNET3_CMD_REGISTER_MEMREGS,
VMXNET3_CMD_FIRST_GET = 0xF00D0000,
VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET,
@@ -120,7 +123,9 @@ typedef enum {
VMXNET3_CMD_GET_DID_HI,
VMXNET3_CMD_GET_DEV_EXTRA_INFO,
VMXNET3_CMD_GET_CONF_INTR,
- VMXNET3_CMD_GET_ADAPTIVE_RING_INFO
+ VMXNET3_CMD_GET_ADAPTIVE_RING_INFO,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE,
+ VMXNET3_CMD_RESERVED5,
} Vmxnet3_Cmd;
/* Adaptive Ring Info Flags */
@@ -402,12 +407,25 @@ typedef union Vmxnet3_GenericDesc {
#define VMXNET3_RING_SIZE_ALIGN 32
#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1)
+/* Tx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_TXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_TXDATA_DESC_SIZE_MASK (VMXNET3_TXDATA_DESC_SIZE_ALIGN - 1)
+
+/* Rx Data Ring buffer size must be a multiple of 64 */
+#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
+#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
+
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
#define VMXNET3_RX_RING_MAX_SIZE 4096
#define VMXNET3_RC_RING_MAX_SIZE 8192
+#define VMXNET3_TXDATA_DESC_MIN_SIZE 128
+#define VMXNET3_TXDATA_DESC_MAX_SIZE 2048
+
+#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
+
/* a list of reasons for queue stop */
#define VMXNET3_ERR_NOEOP 0x80000000 /* cannot find the EOP desc of a pkt */
@@ -507,7 +525,9 @@ struct Vmxnet3_TxQueueConf {
__le32 compRingSize; /* # of comp desc */
__le32 ddLen; /* size of driver data */
uint8 intrIdx;
- uint8 _pad[7];
+ uint8 _pad[1];
+ __le16 txDataRingDescSize;
+ uint8 _pad2[4];
}
#include "vmware_pack_end.h"
Vmxnet3_TxQueueConf;
@@ -518,12 +538,14 @@ struct Vmxnet3_RxQueueConf {
__le64 rxRingBasePA[2];
__le64 compRingBasePA;
__le64 ddPA; /* driver data */
- __le64 reserved;
+ __le64 rxDataRingBasePA;
__le32 rxRingSize[2]; /* # of rx desc */
__le32 compRingSize; /* # of rx comp desc */
__le32 ddLen; /* size of driver data */
uint8 intrIdx;
- uint8 _pad[7];
+ uint8 _pad1[1];
+ __le16 rxDataRingDescSize; /* size of rx data ring buffer */
+ uint8 _pad2[4];
}
#include "vmware_pack_end.h"
Vmxnet3_RxQueueConf;
@@ -695,12 +717,65 @@ Vmxnet3_RxQueueDesc;
typedef
#include "vmware_pack_begin.h"
+struct Vmxnet3_SetPolling {
+ uint8 enablePolling;
+}
+#include "vmware_pack_end.h"
+Vmxnet3_SetPolling;
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MemoryRegion {
+ __le64 startPA;
+ __le32 length;
+ __le16 txQueueBits; /* bit n corresponding to tx queue n */
+ __le16 rxQueueBits; /* bit n corresponding to rx queue n */
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MemoryRegion;
+
+#define MAX_MEMORY_REGION_PER_QUEUE 16
+#define MAX_MEMORY_REGION_PER_DEVICE 256
+
+typedef
+#include "vmware_pack_begin.h"
+struct Vmxnet3_MemRegs {
+ __le16 numRegs;
+ __le16 pad[3];
+ Vmxnet3_MemoryRegion memRegs[1];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_MemRegs;
+
+/*
+ * If the command data <= 16 bytes, use the shared memory direcly.
+ * Otherwise, use the variable length configuration descriptor.
+ */
+typedef
+#include "vmware_pack_begin.h"
+union Vmxnet3_CmdInfo {
+ Vmxnet3_VariableLenConfDesc varConf;
+ Vmxnet3_SetPolling setPolling;
+ __le64 data[2];
+}
+#include "vmware_pack_end.h"
+Vmxnet3_CmdInfo;
+
+typedef
+#include "vmware_pack_begin.h"
struct Vmxnet3_DriverShared {
__le32 magic;
__le32 pad; /* make devRead start at 64-bit boundaries */
Vmxnet3_DSDevRead devRead;
__le32 ecr;
- __le32 reserved[5];
+ __le32 reserved;
+
+ union {
+ __le32 reserved1[4];
+ Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of executing the
+ * relevant command
+ */
+ } cu;
}
#include "vmware_pack_end.h"
Vmxnet3_DriverShared;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c
index 8bb13e52..98252bb6 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c
@@ -56,6 +56,7 @@
#include <rte_alarm.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_atomic.h>
#include <rte_string_fns.h>
#include <rte_malloc.h>
@@ -69,6 +70,8 @@
#define PROCESS_SYS_EVENTS 0
+#define VMXNET3_TX_MAX_SEG UINT8_MAX
+
static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
@@ -138,7 +141,7 @@ gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%d_%s",
- dev->driver->pci_drv.driver.name, dev->data->port_id, post_string);
+ dev->data->drv_name, dev->data->port_id, post_string);
mz = rte_memzone_lookup(z_name);
if (!reuse) {
@@ -223,6 +226,24 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw)
}
/*
+ * Gets tx data ring descriptor size.
+ */
+static uint16_t
+eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
+{
+ uint16 txdata_desc_size;
+
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
+ txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+ return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
+ txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
+ txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
+ sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
+}
+
+/*
* It returns 0 on success.
*/
static int
@@ -237,7 +258,8 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
- pci_dev = eth_dev->pci_dev;
+ eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
/*
* for secondary processes, we don't initialize any further as primary
@@ -247,6 +269,7 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
/* Vendor and Device ID need to be set before init of shared code */
hw->device_id = pci_dev->id.device_id;
@@ -261,13 +284,26 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
/* Check h/w version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
- if (ver & 0x1)
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
- else {
- PMD_INIT_LOG(ERR, "Incompatible h/w version, should be 0x1");
+
+ if (ver & (1 << VMXNET3_REV_3)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_3);
+ hw->version = VMXNET3_REV_3 + 1;
+ } else if (ver & (1 << VMXNET3_REV_2)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_2);
+ hw->version = VMXNET3_REV_2 + 1;
+ } else if (ver & (1 << VMXNET3_REV_1)) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
+ 1 << VMXNET3_REV_1);
+ hw->version = VMXNET3_REV_1 + 1;
+ } else {
+ PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
return -EIO;
}
+ PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version);
+
/* Check UPT version compatibility with driver. */
ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
@@ -307,6 +343,14 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
/* allow untagged pkts */
VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
+ hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
+ eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
+
+ hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
+ VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
+ RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
+ hw->rxdata_desc_size);
+
return 0;
}
@@ -326,6 +370,7 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
eth_dev->tx_pkt_burst = NULL;
+ eth_dev->tx_pkt_prepare = NULL;
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
@@ -333,16 +378,23 @@ eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
}
-static struct eth_driver rte_vmxnet3_pmd = {
- .pci_drv = {
- .id_table = pci_id_vmxnet3_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = eth_vmxnet3_dev_init,
- .eth_dev_uninit = eth_vmxnet3_dev_uninit,
- .dev_private_size = sizeof(struct vmxnet3_hw),
+static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
+}
+
+static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
+}
+
+static struct rte_pci_driver rte_vmxnet3_pmd = {
+ .id_table = pci_id_vmxnet3_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_vmxnet3_pci_probe,
+ .remove = eth_vmxnet3_pci_remove,
};
static int
@@ -449,6 +501,92 @@ vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
}
static int
+vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
+{
+ struct vmxnet3_hw *hw = dev->data->dev_private;
+ Vmxnet3_DriverShared *shared = hw->shared;
+ Vmxnet3_CmdInfo *cmdInfo;
+ struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
+ uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
+ uint32_t num, i, j, size;
+
+ if (hw->memRegsPA == 0) {
+ const struct rte_memzone *mz;
+
+ size = sizeof(Vmxnet3_MemRegs) +
+ (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
+ sizeof(Vmxnet3_MemoryRegion);
+
+ mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
+ 1);
+ if (mz == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
+ return -ENOMEM;
+ }
+ memset(mz->addr, 0, mz->len);
+ hw->memRegs = mz->addr;
+ hw->memRegsPA = mz->phys_addr;
+ }
+
+ num = hw->num_rx_queues;
+
+ for (i = 0; i < num; i++) {
+ vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
+
+ mp[i] = rxq->mp;
+ index[i] = 1 << i;
+ }
+
+ /*
+ * The same mempool could be used by multiple queues. In such a case,
+ * remove duplicate mempool entries. Only one entry is kept with
+ * bitmask indicating queues that are using this mempool.
+ */
+ for (i = 1; i < num; i++) {
+ for (j = 0; j < i; j++) {
+ if (mp[i] == mp[j]) {
+ mp[i] = NULL;
+ index[j] |= 1 << i;
+ break;
+ }
+ }
+ }
+
+ j = 0;
+ for (i = 0; i < num; i++) {
+ if (mp[i] == NULL)
+ continue;
+
+ Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
+
+ mr->startPA =
+ (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->phys_addr;
+ mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
+ STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
+ mr->txQueueBits = index[i];
+ mr->rxQueueBits = index[i];
+
+ PMD_INIT_LOG(INFO,
+ "index: %u startPA: %" PRIu64 " length: %u, "
+ "rxBits: %x",
+ j, mr->startPA, mr->length, mr->rxQueueBits);
+ j++;
+ }
+ hw->memRegs->numRegs = j;
+ PMD_INIT_LOG(INFO, "numRegs: %u", j);
+
+ size = sizeof(Vmxnet3_MemRegs) +
+ (j - 1) * sizeof(Vmxnet3_MemoryRegion);
+
+ cmdInfo = &shared->cu.cmdInfo;
+ cmdInfo->varConf.confVer = 1;
+ cmdInfo->varConf.confLen = size;
+ cmdInfo->varConf.confPA = hw->memRegsPA;
+
+ return 0;
+}
+
+static int
vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
{
struct rte_eth_conf port_conf = dev->data->dev_conf;
@@ -497,6 +635,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
tqd->conf.txRingSize = txq->cmd_ring.size;
tqd->conf.compRingSize = txq->comp_ring.size;
tqd->conf.dataRingSize = txq->data_ring.size;
+ tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
tqd->conf.intrIdx = txq->comp_ring.intr_idx;
tqd->status.stopped = TRUE;
tqd->status.error = 0;
@@ -515,6 +654,10 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
rqd->conf.compRingSize = rxq->comp_ring.size;
rqd->conf.intrIdx = rxq->comp_ring.intr_idx;
+ if (VMXNET3_VERSION_GE_3(hw)) {
+ rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
+ rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
+ }
rqd->status.stopped = TRUE;
rqd->status.error = 0;
memset(&rqd->stats, 0, sizeof(rqd->stats));
@@ -583,6 +726,20 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
return -EINVAL;
}
+ /* Setup memory region for rx buffers */
+ ret = vmxnet3_dev_setup_memreg(dev);
+ if (ret == 0) {
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_REGISTER_MEMREGS);
+ ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+ if (ret != 0)
+ PMD_INIT_LOG(DEBUG,
+ "Failed in setup memory region cmd\n");
+ ret = 0;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
+ }
+
/* Disable interrupts */
vmxnet3_disable_intr(hw);
@@ -596,6 +753,8 @@ vmxnet3_dev_start(struct rte_eth_dev *dev)
return ret;
}
+ hw->adapter_stopped = FALSE;
+
/* Setting proper Rx Mode and issue Rx Mode Update command */
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
@@ -706,13 +865,16 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
}
static void
-vmxnet3_dev_info_get(__rte_unused struct rte_eth_dev *dev,
+vmxnet3_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
@@ -728,6 +890,8 @@ vmxnet3_dev_info_get(__rte_unused struct rte_eth_dev *dev,
.nb_max = VMXNET3_TX_RING_MAX_SIZE,
.nb_min = VMXNET3_DEF_TX_RING_SIZE,
.nb_align = 1,
+ .nb_seg_max = VMXNET3_TX_MAX_SEG,
+ .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
};
dev_info->rx_offload_capa =
@@ -771,7 +935,7 @@ vmxnet3_dev_link_update(struct rte_eth_dev *dev,
__rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
- struct rte_eth_link old, link;
+ struct rte_eth_link old = { 0 }, link;
uint32_t ret;
/* Link status doesn't change for stopped dev */
@@ -960,5 +1124,6 @@ vmxnet3_process_events(struct vmxnet3_hw *hw)
}
#endif
-RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h
index 7d3b11ee..7a032629 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethdev.h
+++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h
@@ -34,6 +34,8 @@
#ifndef _VMXNET3_ETHDEV_H_
#define _VMXNET3_ETHDEV_H_
+#include <rte_io.h>
+
#define VMXNET3_MAX_MAC_ADDRS 1
/* UPT feature to negotiate */
@@ -99,6 +101,11 @@ struct vmxnet3_hw {
uint8_t num_rx_queues;
uint8_t bufs_per_pkt;
+ uint8_t version;
+
+ uint16_t txdata_desc_size; /* tx data ring buffer size */
+ uint16_t rxdata_desc_size; /* rx data ring buffer size */
+
Vmxnet3_TxQueueDesc *tqd_start; /* start address of all tx queue desc */
Vmxnet3_RxQueueDesc *rqd_start; /* start address of all rx queue desc */
@@ -112,15 +119,24 @@ struct vmxnet3_hw {
uint64_t rss_confPA;
vmxnet3_mf_table_t *mf_table;
uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+ Vmxnet3_MemRegs *memRegs;
+ uint64_t memRegsPA;
#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
};
+#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
+#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
+#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
+
+#define VMXNET3_VERSION_GE_3(hw) ((hw)->version >= VMXNET3_REV_3 + 1)
+#define VMXNET3_VERSION_GE_2(hw) ((hw)->version >= VMXNET3_REV_2 + 1)
+
#define VMXNET3_GET_ADDR_LO(reg) ((uint32_t)(reg))
#define VMXNET3_GET_ADDR_HI(reg) ((uint32_t)(((uint64_t)(reg)) >> 32))
/* Config space read/writes */
-#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+#define VMXNET3_PCI_REG(reg) rte_read32(reg)
static inline uint32_t
vmxnet3_read_addr(volatile void *addr)
@@ -128,9 +144,7 @@ vmxnet3_read_addr(volatile void *addr)
return VMXNET3_PCI_REG(addr);
}
-#define VMXNET3_PCI_REG_WRITE(reg, value) do { \
- VMXNET3_PCI_REG((reg)) = (value); \
-} while(0)
+#define VMXNET3_PCI_REG_WRITE(reg, value) rte_write32((value), (reg))
#define VMXNET3_PCI_BAR0_REG_ADDR(hw, reg) \
((volatile uint32_t *)((char *)(hw)->hw_addr0 + (reg)))
@@ -146,6 +160,20 @@ vmxnet3_read_addr(volatile void *addr)
#define VMXNET3_WRITE_BAR1_REG(hw, reg, value) \
VMXNET3_PCI_REG_WRITE(VMXNET3_PCI_BAR1_REG_ADDR((hw), (reg)), (value))
+static inline uint8_t
+vmxnet3_get_ring_idx(struct vmxnet3_hw *hw, uint32 rqID)
+{
+ return (rqID >= hw->num_rx_queues &&
+ rqID < 2 * hw->num_rx_queues) ? 1 : 0;
+}
+
+static inline bool
+vmxnet3_rx_data_ring(struct vmxnet3_hw *hw, uint32 rqID)
+{
+ return (rqID >= 2 * hw->num_rx_queues &&
+ rqID < 3 * hw->num_rx_queues);
+}
+
/*
* RX/TX function prototypes
*/
@@ -171,5 +199,7 @@ uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t vmxnet3_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
#endif /* _VMXNET3_ETHDEV_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_ring.h b/drivers/net/vmxnet3/vmxnet3_ring.h
index b50d2b00..d2e8323b 100644
--- a/drivers/net/vmxnet3/vmxnet3_ring.h
+++ b/drivers/net/vmxnet3/vmxnet3_ring.h
@@ -42,6 +42,9 @@
#define VMXNET3_DEF_TX_RING_SIZE 512
#define VMXNET3_DEF_RX_RING_SIZE 128
+/* Default rx data ring desc size */
+#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
+
#define VMXNET3_SUCCESS 0
#define VMXNET3_FAIL -1
@@ -138,9 +141,11 @@ typedef struct vmxnet3_tx_queue {
uint32_t qid;
struct Vmxnet3_TxQueueDesc *shared;
struct vmxnet3_txq_stats stats;
+ const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device TX queue index. */
uint8_t port_id; /**< Device port identifier. */
+ uint16_t txdata_desc_size;
} vmxnet3_tx_queue_t;
struct vmxnet3_rxq_stats {
@@ -150,17 +155,28 @@ struct vmxnet3_rxq_stats {
uint64_t rx_buf_alloc_failure;
};
+struct vmxnet3_rx_data_ring {
+ uint8_t *base;
+ uint64_t basePA;
+ uint32_t size;
+};
+
typedef struct vmxnet3_rx_queue {
struct rte_mempool *mp;
struct vmxnet3_hw *hw;
struct vmxnet3_cmd_ring cmd_ring[VMXNET3_RX_CMDRING_SIZE];
struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_rx_data_ring data_ring;
+ uint16_t data_desc_size;
uint32_t qid1;
uint32_t qid2;
+ /* rqID in RCD for buffer from data ring */
+ uint32_t data_ring_qid;
Vmxnet3_RxQueueDesc *shared;
struct rte_mbuf *start_seg;
struct rte_mbuf *last_seg;
struct vmxnet3_rxq_stats stats;
+ const struct rte_memzone *mz;
bool stopped;
uint16_t queue_id; /**< Device RX queue index. */
uint8_t port_id; /**< Device port identifier. */
diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 93db10fb..e865c675 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -69,6 +69,7 @@
#include <rte_sctp.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
+#include <rte_net.h>
#include "base/vmxnet3_defs.h"
#include "vmxnet3_ring.h"
@@ -76,6 +77,14 @@
#include "vmxnet3_logs.h"
#include "vmxnet3_ethdev.h"
+#define VMXNET3_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_TCP_SEG)
+
+#define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+
static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
@@ -192,6 +201,8 @@ vmxnet3_dev_tx_queue_release(void *txq)
vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
/* Release the cmd_ring */
vmxnet3_cmd_ring_release(&tq->cmd_ring);
+ /* Release the memzone */
+ rte_memzone_free(tq->mz);
}
}
@@ -209,6 +220,9 @@ vmxnet3_dev_rx_queue_release(void *rxq)
/* Release both the cmd_rings */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+
+ /* Release the memzone */
+ rte_memzone_free(rq->mz);
}
}
@@ -235,7 +249,7 @@ vmxnet3_dev_tx_queue_reset(void *txq)
size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
- size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+ size += tq->txdata_desc_size * data_ring->size;
memset(ring->base, 0, size);
}
@@ -245,8 +259,10 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
{
int i;
vmxnet3_rx_queue_t *rq = rxq;
+ struct vmxnet3_hw *hw = rq->hw;
struct vmxnet3_cmd_ring *ring0, *ring1;
struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
int size;
if (rq != NULL) {
@@ -271,6 +287,8 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
+ size += rq->data_desc_size * data_ring->size;
memset(ring0->base, 0, size);
}
@@ -350,6 +368,53 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
}
uint16_t
+vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int32_t ret;
+ uint32_t i;
+ uint64_t ol_flags;
+ struct rte_mbuf *m;
+
+ for (i = 0; i != nb_pkts; i++) {
+ m = tx_pkts[i];
+ ol_flags = m->ol_flags;
+
+ /* Non-TSO packet cannot occupy more than
+ * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
+ */
+ if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+ m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ /* check that only supported TX offloads are requested. */
+ if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
+ (ol_flags & PKT_TX_L4_MASK) ==
+ PKT_TX_SCTP_CKSUM) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
+uint16_t
vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
@@ -415,10 +480,13 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
}
if (txm->nb_segs == 1 &&
- rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
struct Vmxnet3_TxDataDesc *tdd;
- tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
+ tdd = (struct Vmxnet3_TxDataDesc *)
+ ((uint8 *)txq->data_ring.base +
+ txq->cmd_ring.next2fill *
+ txq->txdata_desc_size);
copy_size = rte_pktmbuf_pkt_len(txm);
rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
}
@@ -435,12 +503,15 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* maximum size of mbuf segment size.
*/
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
- if (copy_size)
- gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
- txq->cmd_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
- else
+ if (copy_size) {
+ uint64 offset = txq->cmd_ring.next2fill *
+ txq->txdata_desc_size;
+ gdesc->txd.addr =
+ rte_cpu_to_le_64(txq->data_ring.basePA +
+ offset);
+ } else {
gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+ }
gdesc->dword[2] = dw2 | m_seg->data_len;
gdesc->dword[3] = 0;
@@ -696,7 +767,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
}
idx = rcd->rxdIdx;
- ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
+ ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
RTE_SET_USED(rxd); /* used only for assert when enabled */
rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
@@ -762,6 +833,15 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
goto rcd_done;
}
+ if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
+ uint8_t *rdd = rxq->data_ring.base +
+ idx * rxq->data_desc_size;
+
+ RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
+ rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
+ rdd, rcd->len);
+ }
+
rxq->start_seg = rxm;
vmxnet3_rx_offload(rcd, rxm);
} else {
@@ -816,30 +896,6 @@ rcd_done:
return nb_rx;
}
-/*
- * Create memzone for device rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
- dev->data->port_id, queue_id);
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- return mz;
-
- return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, VMXNET3_RING_BA_ALIGN);
-}
-
int
vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
@@ -876,6 +932,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->hw = hw;
txq->qid = queue_idx;
txq->stopped = TRUE;
+ txq->txdata_desc_size = hw->txdata_desc_size;
ring = &txq->cmd_ring;
comp_ring = &txq->comp_ring;
@@ -905,13 +962,15 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
- size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
+ size += txq->txdata_desc_size * data_ring->size;
- mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ txq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring initialization */
@@ -955,6 +1014,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
struct vmxnet3_hw *hw = dev->data->dev_private;
struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
struct vmxnet3_comp_ring *comp_ring;
+ struct vmxnet3_rx_data_ring *data_ring;
int size;
uint8_t i;
char mem_name[32];
@@ -975,11 +1035,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->hw = hw;
rxq->qid1 = queue_idx;
rxq->qid2 = queue_idx + hw->num_rx_queues;
+ rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
+ rxq->data_desc_size = hw->rxdata_desc_size;
rxq->stopped = TRUE;
ring0 = &rxq->cmd_ring[0];
ring1 = &rxq->cmd_ring[1];
comp_ring = &rxq->comp_ring;
+ data_ring = &rxq->data_ring;
/* Rx vmxnet rings length should be between 256-4096 */
if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
@@ -995,6 +1058,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
}
comp_ring->size = ring0->size + ring1->size;
+ data_ring->size = ring0->size;
/* Rx vmxnet rings structure initialization */
ring0->next2fill = 0;
@@ -1008,12 +1072,16 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
+ size += rxq->data_desc_size * data_ring->size;
- mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
+ mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
+ VMXNET3_RING_BA_ALIGN, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
return -ENOMEM;
}
+ rxq->mz = mz;
memset(mz->addr, 0, mz->len);
/* cmd_ring0 initialization */
@@ -1029,6 +1097,14 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
ring1->size;
+ /* data_ring initialization */
+ if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
+ data_ring->base =
+ (uint8_t *)(comp_ring->base + comp_ring->size);
+ data_ring->basePA = comp_ring->basePA +
+ sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
+ }
+
/* cmd_ring0-cmd_ring1 buf_info allocation */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
diff --git a/drivers/net/xenvirt/Makefile b/drivers/net/xenvirt/Makefile
index 1d05b71b..8b4b8f03 100644
--- a/drivers/net/xenvirt/Makefile
+++ b/drivers/net/xenvirt/Makefile
@@ -54,10 +54,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += rte_eth_xenvirt.c rte_mempool_gntalloc.
#
SYMLINK-y-include += rte_eth_xenvirt.h
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_net
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += lib/librte_cmdline
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c
index c08a0568..7bd29fae 100644
--- a/drivers/net/xenvirt/rte_eth_xenvirt.c
+++ b/drivers/net/xenvirt/rte_eth_xenvirt.c
@@ -70,8 +70,6 @@
/* virtio_idx is increased after new device is created.*/
static int virtio_idx = 0;
-static const char *drivername = "xen virtio PMD";
-
static struct rte_eth_link pmd_link = {
.link_speed = ETH_SPEED_NUM_10G,
.link_duplex = ETH_LINK_FULL_DUPLEX,
@@ -331,13 +329,11 @@ eth_dev_info(struct rte_eth_dev *dev,
struct pmd_internals *internals = dev->data->dev_private;
RTE_SET_USED(internals);
- dev_info->driver_name = drivername;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)2048;
dev_info->max_rx_queues = (uint16_t)1;
dev_info->max_tx_queues = (uint16_t)1;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = NULL;
}
static void
@@ -620,6 +616,7 @@ enum dev_action {
DEV_ATTACH
};
+static struct rte_vdev_driver pmd_xenvirt_drv;
static int
eth_dev_xenvirt_create(const char *name, const char *params,
@@ -673,10 +670,9 @@ eth_dev_xenvirt_create(const char *name, const char *params,
eth_dev->data = data;
eth_dev->dev_ops = &ops;
- eth_dev->data->dev_flags = RTE_PCI_DRV_DETACHABLE;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
eth_dev->data->kdrv = RTE_KDRV_NONE;
- eth_dev->data->drv_name = drivername;
- eth_dev->driver = NULL;
+ eth_dev->data->drv_name = pmd_xenvirt_drv.driver.name;
eth_dev->data->numa_node = numa_node;
eth_dev->rx_pkt_burst = eth_xenvirt_rx;
@@ -729,7 +725,7 @@ eth_dev_xenvirt_free(const char *name, const unsigned numa_node)
/*TODO: Support multiple process model */
static int
-rte_pmd_xenvirt_probe(const char *name, const char *params)
+rte_pmd_xenvirt_probe(struct rte_vdev_device *dev)
{
if (virtio_idx == 0) {
if (xenstore_init() != 0) {
@@ -741,14 +737,15 @@ rte_pmd_xenvirt_probe(const char *name, const char *params)
return -1;
}
}
- eth_dev_xenvirt_create(name, params, rte_socket_id(), DEV_CREATE);
+ eth_dev_xenvirt_create(rte_vdev_device_name(dev),
+ rte_vdev_device_args(dev), rte_socket_id(), DEV_CREATE);
return 0;
}
static int
-rte_pmd_xenvirt_remove(const char *name)
+rte_pmd_xenvirt_remove(struct rte_vdev_device *dev)
{
- eth_dev_xenvirt_free(name, rte_socket_id());
+ eth_dev_xenvirt_free(rte_vdev_device_name(dev), rte_socket_id());
if (virtio_idx == 0) {
if (xenstore_uninit() != 0)